source
stringlengths
3
92
c
stringlengths
26
2.25M
mlp_openmp.c
/** * @file app.c * @brief Template for a Host Application Source File. * */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <stdint.h> #include "../../support/timer.h" #include "../../support/common.h" T** A; T* B; T* C; // Create input arrays static void init_data(T** A, T* B, unsigned int m_size, unsigned int n_size){ for (unsigned int l = 0; l < NUM_LAYERS; l++) for (unsigned int i = 0; i < m_size * n_size; i++){ if(i % 100 < 98){ A[l][i] = 0; }else{ A[l][i] = (l+i) % 2; } } for (unsigned int i = 0; i < n_size; i++){ if(i % 50 < 48){ B[i] = 0; } else{ B[i] = i % 2; } } } // Compute output in the host static void mlp_host(T* C, T** A, T* B, unsigned int m_size, unsigned int n_size) { for (unsigned int nl = 0; nl < NUM_LAYERS; nl++){ for (unsigned int m = 0; m < m_size; m++){ C[m] = 0; } #pragma omp parallel for for (unsigned int m = 0; m < m_size; m++){ for (unsigned int n = 0; n < n_size; n++){ C[m] += A[nl][m * n_size + n] * B[n]; } C[m] = max(0, C[m]); } for (unsigned int n = 0; n < n_size; n++){ B[n] = C[n]; } } } static uint64_t mlp_host_sum(uint64_t n_size, uint64_t m_size) { uint64_t sum = 0; for (uint64_t m = 0; m < n_size; m++){ sum += B[m]; } return sum; } // Params --------------------------------------------------------------------- typedef struct Params { char* dpu_type; int nr_of_ranks; int input_size_n; int input_size_m; int n_warmup; int n_reps; }Params; void usage() { fprintf(stderr, "\nUsage: ./program [options]" "\n" "\nGeneral options:" "\n -h help" "\n -d <D> DPU type (default=fsim)" "\n -r <R> # of ranks (default=2)" "\n" "\nBenchmark-specific options:" "\n -i <I> input size (default=8M elements)" "\n"); } struct Params input_params(int argc, char **argv) { struct Params p; p.dpu_type = "fsim"; p.nr_of_ranks = 1; p.input_size_n = 1 << 9; p.input_size_m = 1 << 9; p.n_warmup = 2; p.n_reps = 3; int opt; while((opt = getopt(argc, argv, "hd:r:i:")) >= 0) { switch(opt) { case 'h': usage(); exit(0); break; case 'd': p.dpu_type = optarg; break; case 'r': p.nr_of_ranks = atoi(optarg); break; case 'n': p.input_size_n = atoi(optarg); break; case 'm': p.input_size_m = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(p.nr_of_ranks > 0 && "Invalid # of ranks!"); return p; } /** * @brief Main of the Host Application. */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); uint64_t n_size = 8192; uint64_t m_size = 20480; Timer timer; A = malloc(NUM_LAYERS * sizeof(T*)); for(int l = 0; l < NUM_LAYERS; l++) A[l] = malloc(n_size*m_size*sizeof(unsigned int)); B = malloc(m_size*sizeof(unsigned int)); C = malloc(m_size*sizeof(unsigned int)); // Create an input file with arbitrary data. init_data(A, B, m_size, n_size); start(&timer, 0, 1); mlp_host(C, A, B, n_size, m_size); stop(&timer, 0); uint32_t sum = mlp_host_sum(n_size, m_size); printf("Kernel "); print(&timer, 0, 1); printf("\n"); printf("SUM = %d \n", sum); for(int l = 0; l < NUM_LAYERS; l++) free(A[l]); free(A); free(B); free(C); return 0; }
mandelbrot_omp.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> // USAGE: mandelbrot_omp <rows> <cols> <x0> <y0> <dx> <dy> // OUTPUT: PERFORMANCE IN TIME SPENT #define TRIALS 2 #define OMP_CHUNK_SIZE 100 double get_time() { struct timespec tt; clock_gettime(CLOCK_REALTIME, &tt); double t = (double)tt.tv_sec * 1.0e9 + (double)tt.tv_nsec; return t; } int main(int argc, char **argv) { FILE *fp; int rows, cols, size, i, j, k, max_iteration, *grid; double ttot, tstart, tend, tmin; char filename[] = "results/mandelbrot_omp.dat"; if (argc < 3) { printf("Usage: mandelbrot_omp cols rows\n"); return 1; } cols = atoi(argv[1]); rows = atoi(argv[2]); size = rows * cols; if (rows < 2 || cols < 2) { printf("Error: cols and rows must be > 2\n"); return 1; } max_iteration = 100; double xmin = argc > 3 ? atof(argv[3]) : -2.5; double ymin = argc > 4 ? atof(argv[4]) : -1; double xmax = argc > 5 ? xmin + atof(argv[5]) : 1; double ymax = argc > 6 ? ymin + atof(argv[6]) : 1; if (xmin >= xmax || ymin >= ymax) { printf("Usage: mandelbrot_mpi cols rows x0=-2.5 y0=-1 dx=-1 dy=1\n"); return 1; } grid = (int *)malloc(size * sizeof(int)); for (k = 0; k < TRIALS; k++) { tmin = 10e10; tstart = get_time(); #pragma omp parallel for schedule(static, OMP_CHUNK_SIZE) for (i = 0; i < size; i++) { int px = i % rows; int py = i / rows; double x0 = (double)px / (rows - 1) * (xmax - xmin) + xmin; double y0 = (double)py / (cols - 1) * (ymax - ymin) + ymin; double x = 0; double y = 0; int iteration = 0; while (x * x + y * y < 2 * 2 && iteration < max_iteration) { double xtemp = x * x - y * y + x0; y = 2 * x * y + y0; x = xtemp; iteration++; } grid[i] = iteration; } tend = get_time(); ttot = tend - tstart; if (ttot < tmin) tmin = ttot; } printf("%.2lf\n", tmin / 10e6); fp = fopen(filename, "w"); fprintf(fp, "%.2lf %.2lf %.2lf %.2lf\n", xmin, ymin, xmax - xmin, ymax - ymin); for (i = 0; i < cols; i++) { for (j = 0; j < rows; j++) { fprintf(fp, "%i ", grid[rows * i + j]); } fprintf(fp, "\n"); } fclose(fp); free(grid); return 0; }
singleModificado.c
/* $ gcc -fopenmp -O2 src/single.c -o bin/single $ ./bin/single Introduce valor de inicialización a: 1 Single ejecutada por el thread 0 Depués de la región parallel: b[0] = 1 b[1] = 1 b[2] = 1 b[3] = 1 b[4] = 1 b[5] = 1 b[6] = 1 b[7] = 1 b[8] = 1 */ #include <stdio.h> #include <omp.h> main() { int n = 9, i, a, b[n]; for (i=0; i<n; i++) b[i] = -1; #pragma omp parallel { #pragma omp single { printf("Introduce valor de inicialización a: "); scanf("%d", &a ); printf("Single ejecutada por el thread %d\n", omp_get_thread_num()); } #pragma omp for for (i=0; i<n; i++){ b[i] = a; printf("b[%d] = %d\t", i, a); } } printf("\n"); }
convolution.h
/* Implicitly dealiased convolution routines. Copyright (C) 2010-2015 John C. Bowman and Malcolm Roberts, Univ. of Alberta This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "Complex.h" #include "fftw++.h" #include "cmult-sse2.h" #include "transposeoptions.h" namespace fftwpp { #ifndef __convolution_h__ #define __convolution_h__ 1 extern const double sqrt3; extern const double hsqrt3; extern const Complex hSqrt3; extern const Complex mhsqrt3; extern const Complex mhalf; extern const Complex zeta3; inline unsigned int min(unsigned int a, unsigned int b) { return (a < b) ? a : b; } inline unsigned int max(unsigned int a, unsigned int b) { return (a > b) ? a : b; } // Build the factored zeta tables. unsigned int BuildZeta(unsigned int n, unsigned int m, Complex *&ZetaH, Complex *&ZetaL, unsigned int threads=1); struct convolveOptions { unsigned int nx,ny,nz; // | unsigned int stride2,stride3; // | Used internally by the MPI interface. utils::mpiOptions mpi; // | bool toplevel; convolveOptions(unsigned int nx, unsigned int ny, unsigned int nz, unsigned int stride2, unsigned int stride3) : nx(nx), ny(ny), nz(nz), stride2(stride2), stride3(stride3), toplevel(true) {} convolveOptions(unsigned int nx, unsigned int ny, unsigned int stride2, utils::mpiOptions mpi, bool toplevel=true) : nx(nx), ny(ny), stride2(stride2), mpi(mpi), toplevel(toplevel) {} convolveOptions(unsigned int ny, unsigned int nz, unsigned int stride2, unsigned int stride3, utils::mpiOptions mpi, bool toplevel=true) : ny(ny), nz(nz), stride2(stride2), stride3(stride3), mpi(mpi), toplevel(toplevel) {} convolveOptions(bool toplevel=true) : nx(0), ny(0), nz(0), toplevel(toplevel) {} }; static const convolveOptions defaultconvolveOptions; typedef void multiplier(Complex **, unsigned int m, const unsigned int indexsize, const unsigned int *index, unsigned int r, unsigned int threads); typedef void realmultiplier(double **, unsigned int m, const unsigned int indexsize, const unsigned int *index, unsigned int r, unsigned int threads); // Multipliers for binary convolutions. multiplier multautoconvolution; multiplier multautocorrelation; multiplier multbinary; multiplier multcorrelation; multiplier multbinary2; multiplier multbinary3; multiplier multbinary4; multiplier multbinary8; realmultiplier multbinary; realmultiplier multbinary2; realmultiplier multadvection2; struct general {}; struct pretransform1 {}; struct pretransform2 {}; struct pretransform3 {}; struct pretransform4 {}; // In-place implicitly dealiased 1D complex convolution using // function pointers for multiplication class ImplicitConvolution : public ThreadBase { private: unsigned int m; Complex **U; unsigned int A; unsigned int B; Complex *u; unsigned int s; Complex *ZetaH, *ZetaL; fft1d *BackwardsO,*ForwardsO; fft1d *Backwards,*Forwards; bool pointers; bool allocated; unsigned int indexsize; public: unsigned int *index; void initpointers(Complex **&U, Complex *u) { unsigned int C=max(A,B); U=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U[a]=u+a*m; pointers=true; } void deletepointers(Complex **&U) { delete [] U; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; } void init() { indexsize=0; Complex* U0=U[0]; Complex* U1=A == 1 ? utils::ComplexAlign(m) : U[1]; BackwardsO=new fft1d(m,1,U0,U1); ForwardsO=new fft1d(m,-1,U0,U1); threads=std::min(threads,max(BackwardsO->Threads(),ForwardsO->Threads())); if(A == B) { Backwards=new fft1d(m,1,U0); threads=std::min(threads,Backwards->Threads()); } if(A <= B) { Forwards=new fft1d(m,-1,U0); threads=std::min(threads,Forwards->Threads()); } if(A == 1) utils::deleteAlign(U1); s=BuildZeta(2*m,m,ZetaH,ZetaL,threads); } // m is the number of Complex data values. // U is an array of C distinct work arrays each of size m, where C=max(A,B) // A is the number of inputs. // B is the number of outputs. ImplicitConvolution(unsigned int m, Complex **U, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), U(U), A(A), B(B), pointers(false), allocated(false) { init(); } // m is the number of Complex data values. // u is a work array of C*m Complex values. // A is the number of inputs. // B is the number of outputs. ImplicitConvolution(unsigned int m, Complex *u, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), A(A), B(B), u(u), allocated(false) { initpointers(U,u); init(); } // m is the number of Complex data values. // A is the number of inputs. // B is the number of outputs. ImplicitConvolution(unsigned int m, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), A(A), B(B), allocated(true) { u=utils::ComplexAlign(max(A,B)*m); initpointers(U,u); init(); } ~ImplicitConvolution() { utils::deleteAlign(ZetaH); utils::deleteAlign(ZetaL); if(pointers) deletepointers(U); if(allocated) utils::deleteAlign(u); if(A == B) delete Backwards; if(A <= B) delete Forwards; delete ForwardsO; delete BackwardsO; } // F is an array of A pointers to distinct data blocks each of size m, // shifted by offset (contents not preserved). void convolve(Complex **F, multiplier *pmult, unsigned int i=0, unsigned int offset=0); void autoconvolve(Complex *f) { Complex *F[]={f}; convolve(F,multautoconvolution); } void autocorrelate(Complex *f) { Complex *F[]={f}; convolve(F,multautocorrelation); } // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } // Binary correlation: void correlate(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multcorrelation); } template<class T> inline void pretransform(Complex **F, unsigned int k, Vec& Zetak); template<class T> void pretransform(Complex **F); void posttransform(Complex *f, Complex *u); }; // In-place implicitly dealiased 1D Hermitian convolution. class ImplicitHConvolution : public ThreadBase { protected: unsigned int m; unsigned int c; bool compact; Complex **U; unsigned int A; unsigned int B; Complex *u; unsigned int s; Complex *ZetaH,*ZetaL; rcfft1d *rc,*rco,*rcO; crfft1d *cr,*cro,*crO; Complex *w; // Work array of size max(A,B) to hold f[c] in even case. bool pointers; bool allocated; bool even; unsigned int indexsize; public: unsigned int *index; void initpointers(Complex **&U, Complex *u) { unsigned int C=max(A,B); U=new Complex *[C]; unsigned stride=c+1; for(unsigned int a=0; a < C; ++a) U[a]=u+a*stride; pointers=true; } void deletepointers(Complex **&U) { delete [] U; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; } void init() { even=m == 2*c; indexsize=0; Complex* U0=U[0]; rc=new rcfft1d(m,U0); cr=new crfft1d(m,U0); Complex* U1=A == 1 ? utils::ComplexAlign(m) : U[1]; rco=new rcfft1d(m,(double *) U0,U1); cro=new crfft1d(m,U1,(double *) U0); if(A == 1) utils::deleteAlign(U1); if(A != B) { rcO=rco; crO=cro; } else { rcO=rc; crO=cr; } threads=std::min(threads,std::max(rco->Threads(),cro->Threads())); s=BuildZeta(3*m,c+2,ZetaH,ZetaL,threads); w=even ? utils::ComplexAlign(max(A,B)) : u; } // m is the number of independent data values // U is an array of max(A,B) distinct work arrays of size c+1, where c=m/2 // A is the number of inputs. // B is the number of outputs. ImplicitHConvolution(unsigned int m, Complex **U, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(true), U(U), A(A), B(B), pointers(false), allocated(false) { init(); } ImplicitHConvolution(unsigned int m, bool compact, Complex **U, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(compact), U(U), A(A), B(B), pointers(false), allocated(false) { init(); } // m is the number of independent data values // u is a work array of max(A,B)*(c+1) Complex values, where c=m/2 // A is the number of inputs. // B is the number of outputs. ImplicitHConvolution(unsigned int m, Complex *u, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(true), A(A), B(B), u(u), allocated(false) { initpointers(U,u); init(); } ImplicitHConvolution(unsigned int m, bool compact, Complex *u, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(compact), A(A), B(B), u(u), allocated(false) { initpointers(U,u); init(); } // m is the number of independent data values // u is a work array of max(A,B)*(c+1) Complex values, where c=m/2 // A is the number of inputs. // B is the number of outputs. ImplicitHConvolution(unsigned int m, bool compact=true, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), m(m), c(m/2), compact(compact), A(A), B(B), u(utils::ComplexAlign(max(A,B)*(c+1))), allocated(true) { initpointers(U,u); init(); } virtual ~ImplicitHConvolution() { if(even) utils::deleteAlign(w); utils::deleteAlign(ZetaH); utils::deleteAlign(ZetaL); if(pointers) deletepointers(U); if(allocated) utils::deleteAlign(u); if(A != B) { delete cro; delete rco; } delete cr; delete rc; } // F is an array of A pointers to distinct data blocks each of size m, // shifted by offset (contents not preserved). void convolve(Complex **F, realmultiplier *pmult, unsigned int i=0, unsigned int offset=0); void pretransform(Complex *F, Complex *f1c, Complex *U); void posttransform(Complex *F, const Complex& f1c, Complex *U); // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } }; // Compute the scrambled implicitly m-padded complex Fourier transform of M // complex vectors, each of length m. // The arrays in and out (which may coincide), along with the array u, must // be allocated as Complex[M*m]. // // fftpad fft(m,M,stride); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fftpad { unsigned int m; unsigned int M; unsigned int stride; unsigned int dist; unsigned int s; Complex *ZetaH, *ZetaL; unsigned int threads; public: mfft1d *Backwards; mfft1d *Forwards; fftpad(unsigned int m, unsigned int M, unsigned int stride, Complex *u=NULL, unsigned int Threads=fftw::maxthreads) : m(m), M(M), stride(stride), threads(Threads) { Backwards=new mfft1d(m,1,M,stride,1,u,NULL,threads); Forwards=new mfft1d(m,-1,M,stride,1,u,NULL,threads); threads=std::max(Backwards->Threads(),Forwards->Threads()); s=BuildZeta(2*m,m,ZetaH,ZetaL,threads); } ~fftpad() { utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete Forwards; delete Backwards; } void expand(Complex *f, Complex *u); void reduce(Complex *f, Complex *u); void backwards(Complex *f, Complex *u); void forwards(Complex *f, Complex *u); }; // Compute the scrambled implicitly m-padded complex Fourier transform of M // complex vectors, each of length 2m-1 with the origin at index m-1, // containing physical data for wavenumbers -m+1 to m-1. // The arrays in and out (which may coincide) must be allocated as // Complex[M*(2m-1)]. The array u must be allocated as Complex[M*(m+1)]. // // fft0pad fft(m,M,stride,u); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fft0pad { protected: unsigned int m; unsigned int M; unsigned int s; unsigned int stride; Complex *ZetaH, *ZetaL; unsigned int threads; public: mfft1d *Forwards; mfft1d *Backwards; fft0pad(unsigned int m, unsigned int M, unsigned int stride, Complex *u=NULL, unsigned int Threads=fftw::maxthreads) : m(m), M(M), stride(stride), threads(Threads) { Backwards=new mfft1d(m,1,M,stride,1,u,NULL,threads); Forwards=new mfft1d(m,-1,M,stride,1,u,NULL,threads); s=BuildZeta(3*m,m,ZetaH,ZetaL); } virtual ~fft0pad() { utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete Forwards; delete Backwards; } // Unscramble indices, returning spatial index stored at position i inline static unsigned findex(unsigned i, unsigned int m) { return i < m-1 ? 3*i : 3*i+4-3*m; // for i >= m-1: j=3*(i-(m-1))+1 } inline static unsigned uindex(unsigned i, unsigned int m) { return i > 0 ? (i < m ? 3*i-1 : 3*m-3) : 3*m-1; } virtual void expand(Complex *f, Complex *u); virtual void reduce(Complex *f, Complex *u); void backwards(Complex *f, Complex *u); virtual void forwards(Complex *f, Complex *u); virtual void Backwards1(Complex *f, Complex *u); virtual void Forwards0(Complex *f); virtual void Forwards1(Complex *f, Complex *u); }; // Compute the scrambled implicitly m-padded complex Fourier transform of M // complex vectors, each of length 2m with the origin at index m, // corresponding to wavenumbers -m to m-1. // The arrays in and out (which may coincide) must be allocated as // Complex[M*2m]. The array u must be allocated as Complex[M*m]. // // fft1pad fft(m,M,stride,u); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fft1pad : public fft0pad { public: fft1pad(unsigned int m, unsigned int M, unsigned int stride, Complex *u=NULL, unsigned int threads=fftw::maxthreads) : fft0pad(m,M,stride,u,threads) {} // Unscramble indices, returning spatial index stored at position i inline static unsigned findex(unsigned i, unsigned int m) { return i < m ? 3*i : 3*(i-m)+1; } inline static unsigned uindex(unsigned i, unsigned int m) { return i > 0 ? 3*i-1 : 3*m-1; } void expand(Complex *f, Complex *u); void reduce(Complex *f, Complex *u); void forwards(Complex *f, Complex *u); void Backwards1(Complex *f, Complex *u); void Forwards0(Complex *f); void Forwards1(Complex *f, Complex *u); }; // In-place implicitly dealiased 2D complex convolution. class ImplicitConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1; Complex *u2; unsigned int A,B; fftpad *xfftpad; ImplicitConvolution **yconvolve; Complex **U2; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers2(Complex **&U2, Complex *u2, unsigned int stride) { U2=new Complex *[A]; for(unsigned int a=0; a < A; ++a) U2[a]=u2+a*stride; if(toplevel) allocateindex(1,new unsigned int[1]); } void deletepointers2(Complex **&U2) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yconvolve[t]->index; } delete [] U2; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { toplevel=options.toplevel; xfftpad=new fftpad(mx,options.ny,options.ny,u2,threads); unsigned int C=max(A,B); yconvolve=new ImplicitConvolution*[threads]; for(unsigned int t=0; t < threads; ++t) yconvolve[t]=new ImplicitConvolution(my,u1+t*my*C,A,B,innerthreads); initpointers2(U2,u2,options.stride2); } void set(convolveOptions& options) { if(options.nx == 0) options.nx=mx; if(options.ny == 0) { options.ny=my; options.stride2=mx*my; } } // u1 is a temporary array of size my*C*threads. // u2 is a temporary array of size mx*my*C. // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *u2, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), u1(u1), u2(u2), A(A), B(B), allocated(false) { set(options); multithread(options.nx); init(options); } ImplicitConvolution2(unsigned int mx, unsigned int my, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), A(A), B(B), allocated(true) { set(options); multithread(options.nx); unsigned int C=max(A,B); u1=utils::ComplexAlign(my*C*threads); u2=utils::ComplexAlign(options.stride2*C); init(options); } virtual ~ImplicitConvolution2() { deletepointers2(U2); for(unsigned int t=0; t < threads; ++t) delete yconvolve[t]; delete [] yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(u2); utils::deleteAlign(u1); } } void backwards(Complex **F, Complex **U2, unsigned int offset) { for(unsigned int a=0; a < A; ++a) xfftpad->backwards(F[a]+offset,U2[a]); } void subconvolution(Complex **F, multiplier *pmult, unsigned int r, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yconvolve[get_thread_num()]->convolve(F,pmult,2*i+r,offset+i*stride); } else { ImplicitConvolution *yconvolve0=yconvolve[0]; for(unsigned int i=0; i < M; ++i) yconvolve0->convolve(F,pmult,2*i+r,offset+i*stride); } } void forwards(Complex **F, Complex **U2, unsigned int offset) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U2[b]); } // F is a pointer to A distinct data blocks each of size mx*my, // shifted by offset (contents not preserved). virtual void convolve(Complex **F, multiplier *pmult, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-2]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } backwards(F,U2,offset); subconvolution(F,pmult,0,mx,my,offset); subconvolution(U2,pmult,1,mx,my); forwards(F,U2,offset); } // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } // Binary correlation: void correlate(Complex *f, Complex *g) { Complex *F[]={f, g}; convolve(F,multcorrelation); } void autoconvolve(Complex *f) { Complex *F[]={f}; convolve(F,multautoconvolution); } void autocorrelate(Complex *f) { Complex *F[]={f}; convolve(F,multautocorrelation); } }; inline void HermitianSymmetrizeX(unsigned int mx, unsigned int my, unsigned int xorigin, Complex *f) { unsigned int offset=xorigin*my; unsigned int stop=mx*my; f[offset].im=0.0; for(unsigned int i=my; i < stop; i += my) f[offset-i]=conj(f[offset+i]); } // Enforce 3D Hermiticity using specified (x,y > 0,z=0) and (x >= 0,y=0,z=0) // data. inline void HermitianSymmetrizeXY(unsigned int mx, unsigned int my, unsigned int mz, unsigned int xorigin, unsigned int yorigin, Complex *f, unsigned int threads=fftw::maxthreads) { int stride=(yorigin+my)*mz; int mxstride=mx*stride; unsigned int myz=my*mz; unsigned int origin=xorigin*stride+yorigin*mz; f[origin].im=0.0; for(int i=stride; i < mxstride; i += stride) f[origin-i]=conj(f[origin+i]); PARALLEL( for(int i=stride-mxstride; i < mxstride; i += stride) { int stop=i+myz; for(int j=i+mz; j < stop; j += mz) { f[origin-j]=conj(f[origin+j]); } } ); } typedef unsigned int IndexFunction(unsigned int, unsigned int m); class ImplicitHConvolution2 : public ThreadBase { protected: unsigned int mx,my; bool xcompact,ycompact; Complex *u1; Complex *u2; unsigned int A,B; fft0pad *xfftpad; ImplicitHConvolution **yconvolve; Complex **U2; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers2(Complex **&U2, Complex *u2, unsigned int stride) { unsigned int C=max(A,B); U2=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U2[a]=u2+a*stride; if(toplevel) allocateindex(1,new unsigned int[1]); } void deletepointers2(Complex **&U2) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yconvolve[t]->index; } delete [] U2; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { unsigned int C=max(A,B); toplevel=options.toplevel; xfftpad=xcompact ? new fft0pad(mx,options.ny,options.ny,u2) : new fft1pad(mx,options.ny,options.ny,u2); yconvolve=new ImplicitHConvolution*[threads]; for(unsigned int t=0; t < threads; ++t) yconvolve[t]=new ImplicitHConvolution(my,ycompact,u1+t*(my/2+1)*C,A,B, innerthreads); initpointers2(U2,u2,options.stride2); } void set(convolveOptions& options) { if(options.nx == 0) options.nx=mx; if(options.ny == 0) { options.ny=my+!ycompact; options.stride2=(mx+xcompact)*options.ny; } } // u1 is a temporary array of size (my/2+1)*C*threads. // u2 is a temporary array of size (mx+xcompact)*(my+!ycompact)*C; // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitHConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *u2, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), xcompact(true), ycompact(true), u1(u1), u2(u2), A(A), B(B), allocated(false) { set(options); multithread(options.nx); init(options); } ImplicitHConvolution2(unsigned int mx, unsigned int my, bool xcompact, bool ycompact, Complex *u1, Complex *u2, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), xcompact(xcompact), ycompact(ycompact), u1(u1), u2(u2), A(A), B(B), allocated(false) { set(options); multithread(options.nx); init(options); } ImplicitHConvolution2(unsigned int mx, unsigned int my, bool xcompact=true, bool ycompact=true, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), xcompact(xcompact), ycompact(ycompact), A(A), B(B), allocated(true) { set(options); multithread(options.nx); unsigned int C=max(A,B); u1=utils::ComplexAlign((my/2+1)*C*threads); u2=utils::ComplexAlign(options.stride2*C); init(options); } virtual ~ImplicitHConvolution2() { deletepointers2(U2); for(unsigned int t=0; t < threads; ++t) delete yconvolve[t]; delete [] yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(u2); utils::deleteAlign(u1); } } void backwards(Complex **F, Complex **U2, unsigned int ny, bool symmetrize, unsigned int offset) { for(unsigned int a=0; a < A; ++a) { Complex *f=F[a]+offset; if(symmetrize) HermitianSymmetrizeX(mx,ny,mx-xcompact,f); xfftpad->backwards(f,U2[a]); } } void subconvolution(Complex **F, realmultiplier *pmult, IndexFunction indexfunction, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yconvolve[get_thread_num()]->convolve(F,pmult,indexfunction(i,mx), offset+i*stride); } else { ImplicitHConvolution *yconvolve0=yconvolve[0]; for(unsigned int i=0; i < M; ++i) yconvolve0->convolve(F,pmult,indexfunction(i,mx),offset+i*stride); } } void forwards(Complex **F, Complex **U2, unsigned int offset) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U2[b]); } // F is a pointer to A distinct data blocks each of size // (2mx-compact)*(my+!ycompact), shifted by offset (contents not preserved). virtual void convolve(Complex **F, realmultiplier *pmult, bool symmetrize=true, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-2]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } unsigned stride=my+!ycompact; backwards(F,U2,stride,symmetrize,offset); subconvolution(F,pmult,xfftpad->findex,2*mx-xcompact,stride,offset); subconvolution(U2,pmult,xfftpad->uindex,mx+xcompact,stride); forwards(F,U2,offset); } // Binary convolution: void convolve(Complex *f, Complex *g, bool symmetrize=true) { Complex *F[]={f,g}; convolve(F,multbinary,symmetrize); } }; // In-place implicitly dealiased 3D complex convolution. class ImplicitConvolution3 : public ThreadBase { protected: unsigned int mx,my,mz; Complex *u1; Complex *u2; Complex *u3; unsigned int A,B; fftpad *xfftpad; ImplicitConvolution2 **yzconvolve; Complex **U3; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers3(Complex **&U3, Complex *u3, unsigned int stride) { unsigned int C=max(A,B); U3=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U3[a]=u3+a*stride; if(toplevel) allocateindex(2,new unsigned int[2]); } void deletepointers3(Complex **&U3) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yzconvolve[t]->index; } delete [] U3; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yzconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yzconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { toplevel=options.toplevel; unsigned int nyz=options.ny*options.nz; xfftpad=new fftpad(mx,nyz,nyz,u3,threads); if(options.nz == mz) { unsigned int C=max(A,B); yzconvolve=new ImplicitConvolution2*[threads]; for(unsigned int t=0; t < threads; ++t) yzconvolve[t]=new ImplicitConvolution2(my,mz,u1+t*mz*C*innerthreads, u2+t*options.stride2*C,A,B, innerthreads,false); initpointers3(U3,u3,options.stride3); } else yzconvolve=NULL; } void set(convolveOptions &options) { if(options.ny == 0) { options.ny=my; options.nz=mz; options.stride2=my*mz; options.stride3=mx*my*mz; } } // u1 is a temporary array of size mz*C*threads. // u2 is a temporary array of size my*mz*C*threads. // u3 is a temporary array of size mx*my*mz*C. // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitConvolution3(unsigned int mx, unsigned int my, unsigned int mz, Complex *u1, Complex *u2, Complex *u3, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), u1(u1), u2(u2), u3(u3), A(A), B(B), allocated(false) { set(options); multithread(mx); init(options); } ImplicitConvolution3(unsigned int mx, unsigned int my, unsigned int mz, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), A(A), B(B), allocated(true) { set(options); multithread(mx); unsigned int C=max(A,B); u1=utils::ComplexAlign(mz*C*threads*innerthreads); u2=utils::ComplexAlign(options.stride2*C*threads); u3=utils::ComplexAlign(options.stride3*C); init(options); } virtual ~ImplicitConvolution3() { if(yzconvolve) { deletepointers3(U3); for(unsigned int t=0; t < threads; ++t) delete yzconvolve[t]; delete [] yzconvolve; } delete xfftpad; if(allocated) { utils::deleteAlign(u3); utils::deleteAlign(u2); utils::deleteAlign(u1); } } void backwards(Complex **F, Complex **U3, unsigned int offset) { for(unsigned int a=0; a < A; ++a) xfftpad->backwards(F[a]+offset,U3[a]); } void subconvolution(Complex **F, multiplier *pmult, unsigned int r, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yzconvolve[get_thread_num()]->convolve(F,pmult,2*i+r,offset+i*stride); } else { ImplicitConvolution2 *yzconvolve0=yzconvolve[0]; for(unsigned int i=0; i < M; ++i) { yzconvolve0->convolve(F,pmult,2*i+r,offset+i*stride); } } } void forwards(Complex **F, Complex **U3, unsigned int offset=0) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U3[b]); } // F is a pointer to A distinct data blocks each of size mx*my*mz, // shifted by offset virtual void convolve(Complex **F, multiplier *pmult, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-3]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yzconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } unsigned int stride=my*mz; backwards(F,U3,offset); subconvolution(F,pmult,0,mx,stride,offset); subconvolution(U3,pmult,1,mx,stride); forwards(F,U3,offset); } // Binary convolution: void convolve(Complex *f, Complex *g) { Complex *F[]={f,g}; convolve(F,multbinary); } // Binary correlation: void correlate(Complex *f, Complex *g) { Complex *F[]={f, g}; convolve(F,multcorrelation); } void autoconvolve(Complex *f) { Complex *F[]={f}; convolve(F,multautoconvolution); } void autocorrelate(Complex *f) { Complex *F[]={f}; convolve(F,multautocorrelation); } }; // In-place implicitly dealiased 3D Hermitian convolution. class ImplicitHConvolution3 : public ThreadBase { protected: unsigned int mx,my,mz; bool xcompact,ycompact,zcompact; Complex *u1; Complex *u2; Complex *u3; unsigned int A,B; fft0pad *xfftpad; ImplicitHConvolution2 **yzconvolve; Complex **U3; bool allocated; unsigned int indexsize; bool toplevel; public: unsigned int *index; void initpointers3(Complex **&U3, Complex *u3, unsigned int stride) { unsigned int C=max(A,B); U3=new Complex *[C]; for(unsigned int a=0; a < C; ++a) U3[a]=u3+a*stride; if(toplevel) allocateindex(2,new unsigned int[2]); } void deletepointers3(Complex **&U3) { if(toplevel) { delete [] index; for(unsigned int t=1; t < threads; ++t) delete [] yzconvolve[t]->index; } delete [] U3; } void allocateindex(unsigned int n, unsigned int *i) { indexsize=n; index=i; yzconvolve[0]->allocateindex(n,i); for(unsigned int t=1; t < threads; ++t) yzconvolve[t]->allocateindex(n,new unsigned int[n]); } void init(const convolveOptions& options) { toplevel=options.toplevel; unsigned int nyz=options.ny*options.nz; xfftpad=xcompact ? new fft0pad(mx,nyz,nyz,u3) : new fft1pad(mx,nyz,nyz,u3); if(options.nz == mz+!zcompact) { unsigned int C=max(A,B); yzconvolve=new ImplicitHConvolution2*[threads]; for(unsigned int t=0; t < threads; ++t) yzconvolve[t]=new ImplicitHConvolution2(my,mz, ycompact,zcompact, u1+t*(mz/2+1)*C*innerthreads, u2+t*options.stride2*C, A,B,innerthreads,false); initpointers3(U3,u3,options.stride3); } else yzconvolve=NULL; } void set(convolveOptions& options) { if(options.ny == 0) { options.ny=2*my-ycompact; options.nz=mz+!zcompact; options.stride2=(my+ycompact)*options.nz; options.stride3=(mx+xcompact)*options.ny*options.nz; } } // u1 is a temporary array of size (mz/2+1)*C*threads. // u2 is a temporary array of size (my+ycompact)*(mz+!zcompact)*C*threads. // u3 is a temporary array of size // (mx+xcompact)*(2my-ycompact)*(mz+!zcompact)*C. // A is the number of inputs. // B is the number of outputs. // Here C=max(A,B). ImplicitHConvolution3(unsigned int mx, unsigned int my, unsigned int mz, Complex *u1, Complex *u2, Complex *u3, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), xcompact(true), ycompact(true), zcompact(true), u1(u1), u2(u2), u3(u3), A(A), B(B), allocated(false) { set(options); multithread(mx); init(options); } ImplicitHConvolution3(unsigned int mx, unsigned int my, unsigned int mz, bool xcompact, bool ycompact, bool zcompact, Complex *u1, Complex *u2, Complex *u3, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), xcompact(xcompact), ycompact(ycompact), zcompact(zcompact), u1(u1), u2(u2), u3(u3), A(A), B(B), allocated(false) { set(options); multithread(mx); init(options); } ImplicitHConvolution3(unsigned int mx, unsigned int my, unsigned int mz, bool xcompact=true, bool ycompact=true, bool zcompact=true, unsigned int A=2, unsigned int B=1, unsigned int threads=fftw::maxthreads, convolveOptions options=defaultconvolveOptions) : ThreadBase(threads), mx(mx), my(my), mz(mz), xcompact(xcompact), ycompact(ycompact), zcompact(zcompact), A(A), B(B), allocated(true) { set(options); multithread(mx); unsigned int C=max(A,B); u1=utils::ComplexAlign((mz/2+1)*C*threads*innerthreads); u2=utils::ComplexAlign(options.stride2*C*threads); u3=utils::ComplexAlign(options.stride3*C); init(options); } virtual ~ImplicitHConvolution3() { if(yzconvolve) { deletepointers3(U3); for(unsigned int t=0; t < threads; ++t) delete yzconvolve[t]; delete [] yzconvolve; } delete xfftpad; if(allocated) { utils::deleteAlign(u3); utils::deleteAlign(u2); utils::deleteAlign(u1); } } virtual void HermitianSymmetrize(Complex *f, Complex *u) { HermitianSymmetrizeXY(mx,my,mz+!zcompact,mx-xcompact,my-ycompact,f, threads); } void backwards(Complex **F, Complex **U3, bool symmetrize, unsigned int offset) { for(unsigned int a=0; a < A; ++a) { Complex *f=F[a]+offset; Complex *u=U3[a]; if(symmetrize) HermitianSymmetrize(f,u); xfftpad->backwards(f,u); } } void subconvolution(Complex **F, realmultiplier *pmult, IndexFunction indexfunction, unsigned int M, unsigned int stride, unsigned int offset=0) { if(threads > 1) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < M; ++i) yzconvolve[get_thread_num()]->convolve(F,pmult,false, indexfunction(i,mx), offset+i*stride); } else { ImplicitHConvolution2 *yzconvolve0=yzconvolve[0]; for(unsigned int i=0; i < M; ++i) yzconvolve0->convolve(F,pmult,false,indexfunction(i,mx), offset+i*stride); } } void forwards(Complex **F, Complex **U3, unsigned int offset=0) { for(unsigned int b=0; b < B; ++b) xfftpad->forwards(F[b]+offset,U3[b]); } // F is a pointer to A distinct data blocks each of size // (2mx-compact)*(2my-ycompact)*(mz+!zcompact), shifted by offset // (contents not preserved). virtual void convolve(Complex **F, realmultiplier *pmult, bool symmetrize=true, unsigned int i=0, unsigned int offset=0) { if(!toplevel) { index[indexsize-3]=i; if(threads > 1) { for(unsigned int t=1; t < threads; ++t) { unsigned int *Index=yzconvolve[t]->index; for(unsigned int i=0; i < indexsize; ++i) Index[i]=index[i]; } } } unsigned int stride=(2*my-ycompact)*(mz+!zcompact); backwards(F,U3,symmetrize,offset); subconvolution(F,pmult,xfftpad->findex,2*mx-xcompact,stride,offset); subconvolution(U3,pmult,xfftpad->uindex,mx+xcompact,stride); forwards(F,U3,offset); } // Binary convolution: void convolve(Complex *f, Complex *g, bool symmetrize=true) { Complex *F[]={f,g}; convolve(F,multbinary,symmetrize); } }; // In-place implicitly dealiased Hermitian ternary convolution. class ImplicitHTConvolution : public ThreadBase { protected: unsigned int m; Complex *u,*v,*w; unsigned int M; unsigned int s; rcfft1d *rc, *rco; crfft1d *cr, *cro; Complex *ZetaH, *ZetaL; Complex **W; bool allocated; unsigned int twom; unsigned int stride; public: void initpointers(Complex **&W, Complex *w) { W=new Complex *[M]; unsigned int m1=m+1; for(unsigned int s=0; s < M; ++s) W[s]=w+s*m1; } void deletepointers(Complex **&W) { delete [] W; } void init() { twom=2*m; stride=twom+2; rc=new rcfft1d(twom,u); cr=new crfft1d(twom,u); rco=new rcfft1d(twom,(double *) u,v); cro=new crfft1d(twom,v,(double *) u); threads=std::min(threads,std::max(rco->Threads(),cro->Threads())); s=BuildZeta(4*m,m,ZetaH,ZetaL,threads); initpointers(W,w); } // u, v, and w are distinct temporary arrays each of size (m+1)*M. ImplicitHTConvolution(unsigned int m, Complex *u, Complex *v, Complex *w, unsigned int M=1) : m(m), u(u), v(v), w(w), M(M), allocated(false) { init(); } ImplicitHTConvolution(unsigned int m, unsigned int M=1) : m(m), u(utils::ComplexAlign(m*M+M)), v(utils::ComplexAlign(m*M+M)), w(utils::ComplexAlign(m*M+M)), M(M), allocated(true) { init(); } ~ImplicitHTConvolution() { deletepointers(W); if(allocated) { utils::deleteAlign(w); utils::deleteAlign(v); utils::deleteAlign(u); } utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete cro; delete rco; delete cr; delete rc; } void mult(double *a, double *b, double **C, unsigned int offset=0); void convolve(Complex **F, Complex **G, Complex **H, Complex *u, Complex *v, Complex **W, unsigned int offset=0); // F, G, and H are distinct pointers to M distinct data blocks each of size // m+1, shifted by offset (contents not preserved). // The output is returned in F[0]. void convolve(Complex **F, Complex **G, Complex **H, unsigned int offset=0) { convolve(F,G,H,u,v,W,offset); } // Constructor for special case M=1: void convolve(Complex *f, Complex *g, Complex *h) { convolve(&f,&g,&h); } }; // In-place implicitly dealiased Hermitian ternary convolution. // Special case G=H, M=1. class ImplicitHFGGConvolution : public ThreadBase { protected: unsigned int m; Complex *u,*v; unsigned int s; rcfft1d *rc, *rco; crfft1d *cr, *cro; Complex *ZetaH, *ZetaL; bool allocated; unsigned int twom; unsigned int stride; public: void init() { twom=2*m; stride=twom+2; rc=new rcfft1d(twom,u); cr=new crfft1d(twom,u); rco=new rcfft1d(twom,(double *) u,v); cro=new crfft1d(twom,v,(double *) u); threads=std::min(threads,std::max(rco->Threads(),cro->Threads())); s=BuildZeta(4*m,m,ZetaH,ZetaL,threads); } // u and v are distinct temporary arrays each of size m+1. ImplicitHFGGConvolution(unsigned int m, Complex *u, Complex *v) : m(m), u(u), v(v), allocated(false) { init(); } ImplicitHFGGConvolution(unsigned int m) : m(m), u(utils::ComplexAlign(m+1)), v(utils::ComplexAlign(m+1)), allocated(true) { init(); } ~ImplicitHFGGConvolution() { if(allocated) { utils::deleteAlign(v); utils::deleteAlign(u); } utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete cro; delete rco; delete cr; delete rc; } void mult(double *a, double *b); void convolve(Complex *f, Complex *g, Complex *u, Complex *v); // f and g are distinct pointers to data of size m+1 (contents not // preserved). The output is returned in f. void convolve(Complex *f, Complex *g) { convolve(f,g,u,v); } }; // In-place implicitly dealiased Hermitian ternary convolution. // Special case F=G=H, M=1. class ImplicitHFFFConvolution : public ThreadBase { protected: unsigned int m; Complex *u; unsigned int s; rcfft1d *rc; crfft1d *cr; Complex *ZetaH, *ZetaL; bool allocated; unsigned int twom; unsigned int stride; public: void mult(double *a); void init() { twom=2*m; stride=twom+2; rc=new rcfft1d(twom,u); cr=new crfft1d(twom,u); threads=std::min(threads,std::max(rc->Threads(),cr->Threads())); s=BuildZeta(4*m,m,ZetaH,ZetaL,threads); } // u is a distinct temporary array of size m+1. ImplicitHFFFConvolution(unsigned int m, Complex *u) : m(m), u(u), allocated(false) { init(); } ImplicitHFFFConvolution(unsigned int m) : m(m), u(utils::ComplexAlign(m+1)), allocated(true) { init(); } ~ImplicitHFFFConvolution() { if(allocated) utils::deleteAlign(u); utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete cr; delete rc; } void convolve(Complex *f, Complex *u); // f is a pointer to data of size m+1 (contents not preserved). // The output is returned in f. void convolve(Complex *f) { convolve(f,u); } }; // Compute the scrambled implicitly 2m-padded complex Fourier transform of M // complex vectors, each of length 2m with the Fourier origin at index m. // The arrays in and out (which may coincide), along // with the array u, must be allocated as Complex[M*2m]. // // fft0bipad fft(m,M,stride); // fft.backwards(in,u); // fft.forwards(in,u); // // Notes: // stride is the spacing between the elements of each Complex vector. // class fft0bipad { unsigned int m; unsigned int M; unsigned int stride; unsigned int s; mfft1d *Backwards; mfft1d *Forwards; Complex *ZetaH, *ZetaL; unsigned int threads; public: fft0bipad(unsigned int m, unsigned int M, unsigned int stride, Complex *f, unsigned int Threads=fftw::maxthreads) : m(m), M(M), stride(stride), threads(Threads) { unsigned int twom=2*m; Backwards=new mfft1d(twom,1,M,stride,1,f,NULL,threads); Forwards=new mfft1d(twom,-1,M,stride,1,f,NULL,threads); threads=std::min(threads, std::max(Backwards->Threads(),Forwards->Threads())); s=BuildZeta(4*m,twom,ZetaH,ZetaL,threads); } ~fft0bipad() { utils::deleteAlign(ZetaL); utils::deleteAlign(ZetaH); delete Forwards; delete Backwards; } void backwards(Complex *f, Complex *u); void forwards(Complex *f, Complex *u); }; // In-place implicitly dealiased 2D Hermitian ternary convolution. class ImplicitHTConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1,*v1,*w1; Complex *u2,*v2,*w2; unsigned int M; fft0bipad *xfftpad; ImplicitHTConvolution *yconvolve; Complex **U2,**V2,**W2; bool allocated; Complex **u,**v; Complex ***W; public: void initpointers(Complex **&u, Complex **&v, Complex ***&W, unsigned int threads) { u=new Complex *[threads]; v=new Complex *[threads]; W=new Complex **[threads]; unsigned int my1M=(my+1)*M; for(unsigned int i=0; i < threads; ++i) { unsigned int imy1M=i*my1M; u[i]=u1+imy1M; v[i]=v1+imy1M; Complex *wi=w1+imy1M; yconvolve->initpointers(W[i],wi); } } void deletepointers(Complex **&u, Complex **&v, Complex ***&W, unsigned int threads) { for(unsigned int i=0; i < threads; ++i) yconvolve->deletepointers(W[i]); delete [] W; delete [] v; delete [] u; } void initpointers(Complex **&U2, Complex **&V2, Complex **&W2, Complex *u2, Complex *v2, Complex *w2) { U2=new Complex *[M]; V2=new Complex *[M]; W2=new Complex *[M]; unsigned int mu=2*mx*(my+1); for(unsigned int s=0; s < M; ++s) { unsigned int smu=s*mu; U2[s]=u2+smu; V2[s]=v2+smu; W2[s]=w2+smu; } } void deletepointers(Complex **&U2, Complex **&V2, Complex **&W2) { delete [] W2; delete [] V2; delete [] U2; } void init() { xfftpad=new fft0bipad(mx,my,my+1,u2,threads); yconvolve=new ImplicitHTConvolution(my,u1,v1,w1,M); yconvolve->Threads(1); initpointers(u,v,W,threads); initpointers(U2,V2,W2,u2,v2,w2); } // u1, v1, and w1 are temporary arrays of size (my+1)*M*threads; // u2, v2, and w2 are temporary arrays of size 2mx*(my+1)*M. // M is the number of data blocks (each corresponding to a dot product term). // threads is the number of threads to use in the outer subconvolution loop. ImplicitHTConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *v1, Complex *w1, Complex *u2, Complex *v2, Complex *w2, unsigned int M=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(u1), v1(v1), w1(w1), u2(u2), v2(v2), w2(w2), M(M), allocated(false) { init(); } ImplicitHTConvolution2(unsigned int mx, unsigned int my, unsigned int M=1, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(utils::ComplexAlign((my+1)*M*threads)), v1(utils::ComplexAlign((my+1)*M*threads)), w1(utils::ComplexAlign((my+1)*M*threads)), u2(utils::ComplexAlign(2*mx*(my+1)*M)), v2(utils::ComplexAlign(2*mx*(my+1)*M)), w2(utils::ComplexAlign(2*mx*(my+1)*M)), M(M), allocated(true) { init(); } ~ImplicitHTConvolution2() { deletepointers(U2,V2,W2); deletepointers(u,v,W,threads); delete yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(w2); utils::deleteAlign(v2); utils::deleteAlign(u2); utils::deleteAlign(w1); utils::deleteAlign(v1); utils::deleteAlign(u1); } } void convolve(Complex **F, Complex **G, Complex **H, Complex **u, Complex **v, Complex ***W, Complex **U2, Complex **V2, Complex **W2, bool symmetrize=true, unsigned int offset=0) { Complex *u2=U2[0]; Complex *v2=V2[0]; Complex *w2=W2[0]; unsigned int my1=my+1; unsigned int mu=2*mx*my1; for(unsigned int s=0; s < M; ++s) { Complex *f=F[s]+offset; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,f); xfftpad->backwards(f,u2+s*mu); } for(unsigned int s=0; s < M; ++s) { Complex *g=G[s]+offset; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,g); xfftpad->backwards(g,v2+s*mu); } for(unsigned int s=0; s < M; ++s) { Complex *h=H[s]+offset; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,h); xfftpad->backwards(h,w2+s*mu); } #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(F,G,H,u[thread],v[thread],W[thread],i+offset); } #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(U2,V2,W2,u[thread],v[thread],W[thread],i+offset); } xfftpad->forwards(F[0]+offset,u2); } // F, G, and H are distinct pointers to M distinct data blocks each of size // 2mx*(my+1), shifted by offset (contents not preserved). // The output is returned in F[0]. void convolve(Complex **F, Complex **G, Complex **H, bool symmetrize=true, unsigned int offset=0) { convolve(F,G,H,u,v,W,U2,V2,W2,symmetrize,offset); } // Constructor for special case M=1: void convolve(Complex *f, Complex *g, Complex *h, bool symmetrize=true) { convolve(&f,&g,&h,symmetrize); } }; // In-place implicitly dealiased 2D Hermitian ternary convolution. // Special case G=H, M=1. class ImplicitHFGGConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1,*v1; Complex *u2,*v2; fft0bipad *xfftpad; ImplicitHFGGConvolution *yconvolve; bool allocated; Complex **u,**v; public: void initpointers(Complex **&u, Complex **&v, unsigned int threads) { u=new Complex *[threads]; v=new Complex *[threads]; unsigned int my1=my+1; for(unsigned int i=0; i < threads; ++i) { unsigned int imy1=i*my1; u[i]=u1+imy1; v[i]=v1+imy1; } } void deletepointers(Complex **&u, Complex **&v) { delete [] v; delete [] u; } void init() { xfftpad=new fft0bipad(mx,my,my+1,u2,threads); yconvolve=new ImplicitHFGGConvolution(my,u1,v1); yconvolve->Threads(1); initpointers(u,v,threads); } // u1 and v1 are temporary arrays of size (my+1)*threads. // u2 and v2 are temporary arrays of size 2mx*(my+1). // threads is the number of threads to use in the outer subconvolution loop. ImplicitHFGGConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *v1, Complex *u2, Complex *v2, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(u1), v1(v1), u2(u2), v2(v2), allocated(false) { init(); } ImplicitHFGGConvolution2(unsigned int mx, unsigned int my, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(utils::ComplexAlign((my+1)*threads)), v1(utils::ComplexAlign((my+1)*threads)), u2(utils::ComplexAlign(2*mx*(my+1))), v2(utils::ComplexAlign(2*mx*(my+1))), allocated(true) { init(); } ~ImplicitHFGGConvolution2() { deletepointers(u,v); delete yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(v2); utils::deleteAlign(u2); utils::deleteAlign(v1); utils::deleteAlign(u1); } } void convolve(Complex *f, Complex *g, Complex **u, Complex **v, Complex *u2, Complex *v2, bool symmetrize=true) { unsigned int my1=my+1; unsigned int mu=2*mx*my1; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,f); xfftpad->backwards(f,u2); if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,g); xfftpad->backwards(g,v2); #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(f+i,g+i,u[thread],v[thread]); } #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) { unsigned int thread=get_thread_num(); yconvolve->convolve(u2+i,v2+i,u[thread],v[thread]); } xfftpad->forwards(f,u2); } void convolve(Complex *f, Complex *g, bool symmetrize=true) { convolve(f,g,u,v,u2,v2,symmetrize); } }; // In-place implicitly dealiased 2D Hermitian ternary convolution. // Special case F=G=H, M=1. class ImplicitHFFFConvolution2 : public ThreadBase { protected: unsigned int mx,my; Complex *u1; Complex *u2; fft0bipad *xfftpad; ImplicitHFFFConvolution *yconvolve; bool allocated; Complex **u; public: void initpointers(Complex **&u, unsigned int threads) { u=new Complex *[threads]; unsigned int my1=my+1; for(unsigned int i=0; i < threads; ++i) u[i]=u1+i*my1; } void deletepointers(Complex **&u) { delete [] u; } void init() { xfftpad=new fft0bipad(mx,my,my+1,u2,threads); yconvolve=new ImplicitHFFFConvolution(my,u1); yconvolve->Threads(1); initpointers(u,threads); } // u1 is a temporary array of size (my+1)*threads. // u2 is a temporary array of size 2mx*(my+1). // threads is the number of threads to use in the outer subconvolution loop. ImplicitHFFFConvolution2(unsigned int mx, unsigned int my, Complex *u1, Complex *u2, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(u1), u2(u2), allocated(false) { init(); } ImplicitHFFFConvolution2(unsigned int mx, unsigned int my, unsigned int threads=fftw::maxthreads) : ThreadBase(threads), mx(mx), my(my), u1(utils::ComplexAlign((my+1)*threads)), u2(utils::ComplexAlign(2*mx*(my+1))), allocated(true) { init(); } ~ImplicitHFFFConvolution2() { deletepointers(u); delete yconvolve; delete xfftpad; if(allocated) { utils::deleteAlign(u2); utils::deleteAlign(u1); } } void convolve(Complex *f, Complex **u, Complex *u2, bool symmetrize=true) { unsigned int my1=my+1; unsigned int mu=2*mx*my1; if(symmetrize) HermitianSymmetrizeX(mx,my1,mx,f); xfftpad->backwards(f,u2); #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) yconvolve->convolve(f+i,u[get_thread_num()]); #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < mu; i += my1) yconvolve->convolve(u2+i,u[get_thread_num()]); xfftpad->forwards(f,u2); } void convolve(Complex *f, bool symmetrize=true) { convolve(f,u,u2,symmetrize); } }; } //end namespace fftwpp #endif
kmp_stats.h
#ifndef KMP_STATS_H #define KMP_STATS_H /** @file kmp_stats.h * Functions for collecting statistics. */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "kmp_config.h" #include "kmp_debug.h" #if KMP_STATS_ENABLED /* Statistics accumulator. Accumulates number of samples and computes min, max, mean, standard deviation on the fly. Online variance calculation algorithm from http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm */ #include "kmp_stats_timing.h" #include <limits> #include <math.h> #include <new> // placement new #include <stdint.h> #include <string> #include <vector> /* Enable developer statistics here if you want them. They are more detailed than is useful for application characterisation and are intended for the runtime library developer. */ #define KMP_DEVELOPER_STATS 0 /* Enable/Disable histogram output */ #define KMP_STATS_HIST 0 /*! * @ingroup STATS_GATHERING * \brief flags to describe the statistic (timer or counter) * */ enum stats_flags_e { noTotal = 1 << 0, //!< do not show a TOTAL_aggregation for this statistic onlyInMaster = 1 << 1, //!< statistic is valid only for master noUnits = 1 << 2, //!< statistic doesn't need units printed next to it notInMaster = 1 << 3, //!< statistic is valid only for non-master threads logEvent = 1 << 4 //!< statistic can be logged on the event timeline when //! KMP_STATS_EVENTS is on (valid only for timers) }; /*! * @ingroup STATS_GATHERING * \brief the states which a thread can be in * */ enum stats_state_e { IDLE, SERIAL_REGION, FORK_JOIN_BARRIER, PLAIN_BARRIER, TASKWAIT, TASKYIELD, TASKGROUP, IMPLICIT_TASK, EXPLICIT_TASK, TEAMS_REGION }; /*! * \brief Add new counters under KMP_FOREACH_COUNTER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(COUNTER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A counter counts the occurrence of some event. Each thread * accumulates its own count, at the end of execution the counts are aggregated * treating each thread as a separate measurement. (Unless onlyInMaster is set, * in which case there's only a single measurement). The min,mean,max are * therefore the values for the threads. Adding the counter here and then * putting a KMP_BLOCK_COUNTER(name) at the point you want to count is all you * need to do. All of the tables and printing is generated from this macro. * Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING */ // clang-format off #define KMP_FOREACH_COUNTER(macro, arg) \ macro(OMP_PARALLEL,stats_flags_e::onlyInMaster|stats_flags_e::noTotal,arg) \ macro(OMP_NESTED_PARALLEL, 0, arg) \ macro(OMP_LOOP_STATIC, 0, arg) \ macro(OMP_LOOP_STATIC_STEAL, 0, arg) \ macro(OMP_LOOP_DYNAMIC, 0, arg) \ macro(OMP_DISTRIBUTE, 0, arg) \ macro(OMP_BARRIER, 0, arg) \ macro(OMP_CRITICAL, 0, arg) \ macro(OMP_SINGLE, 0, arg) \ macro(OMP_MASTER, 0, arg) \ macro(OMP_TEAMS, 0, arg) \ macro(OMP_set_lock, 0, arg) \ macro(OMP_test_lock, 0, arg) \ macro(REDUCE_wait, 0, arg) \ macro(REDUCE_nowait, 0, arg) \ macro(OMP_TASKYIELD, 0, arg) \ macro(OMP_TASKLOOP, 0, arg) \ macro(TASK_executed, 0, arg) \ macro(TASK_cancelled, 0, arg) \ macro(TASK_stolen, 0, arg) // clang-format on /*! * \brief Add new timers under KMP_FOREACH_TIMER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A timer collects multiple samples of some count in each thread and * then finally aggregates all of the samples from all of the threads. For most * timers the printing code also provides an aggregation over the thread totals. * These are printed as TOTAL_foo. The count is normally a time (in ticks), * hence the name "timer". (But can be any value, so we use this for "number of * arguments passed to fork" as well). For timers the threads are not * significant, it's the individual observations that count, so the statistics * are at that level. Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING2 */ // clang-format off #define KMP_FOREACH_TIMER(macro, arg) \ macro (OMP_worker_thread_life, stats_flags_e::logEvent, arg) \ macro (OMP_parallel, stats_flags_e::logEvent, arg) \ macro (OMP_parallel_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_teams, stats_flags_e::logEvent, arg) \ macro (OMP_teams_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_loop_static, 0, arg) \ macro (OMP_loop_static_scheduling, 0, arg) \ macro (OMP_loop_dynamic, 0, arg) \ macro (OMP_loop_dynamic_scheduling, 0, arg) \ macro (OMP_distribute, 0, arg) \ macro (OMP_distribute_scheduling, 0, arg) \ macro (OMP_critical, 0, arg) \ macro (OMP_critical_wait, 0, arg) \ macro (OMP_single, 0, arg) \ macro (OMP_master, 0, arg) \ macro (OMP_task_immediate, 0, arg) \ macro (OMP_task_taskwait, 0, arg) \ macro (OMP_task_taskyield, 0, arg) \ macro (OMP_task_taskgroup, 0, arg) \ macro (OMP_task_join_bar, 0, arg) \ macro (OMP_task_plain_bar, 0, arg) \ macro (OMP_taskloop_scheduling, 0, arg) \ macro (OMP_plain_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_idle, stats_flags_e::logEvent, arg) \ macro (OMP_fork_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_join_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_serial, stats_flags_e::logEvent, arg) \ macro (OMP_set_numthreads, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_PARALLEL_args, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_loop_static_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_static_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_distribute_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ KMP_FOREACH_DEVELOPER_TIMER(macro, arg) // clang-format on // OMP_worker_thread_life -- Time from thread becoming an OpenMP thread (either // initializing OpenMP or being created by a master) // until the thread is destroyed // OMP_parallel -- Time thread spends executing work directly // within a #pragma omp parallel // OMP_parallel_overhead -- Time thread spends setting up a parallel region // OMP_loop_static -- Time thread spends executing loop iterations from // a statically scheduled loop // OMP_loop_static_scheduling -- Time thread spends scheduling loop iterations // from a statically scheduled loop // OMP_loop_dynamic -- Time thread spends executing loop iterations from // a dynamically scheduled loop // OMP_loop_dynamic_scheduling -- Time thread spends scheduling loop iterations // from a dynamically scheduled loop // OMP_critical -- Time thread spends executing critical section // OMP_critical_wait -- Time thread spends waiting to enter // a critical section // OMP_single -- Time spent executing a "single" region // OMP_master -- Time spent executing a "master" region // OMP_task_immediate -- Time spent executing non-deferred tasks // OMP_task_taskwait -- Time spent executing tasks inside a taskwait // construct // OMP_task_taskyield -- Time spent executing tasks inside a taskyield // construct // OMP_task_taskgroup -- Time spent executing tasks inside a taskygroup // construct // OMP_task_join_bar -- Time spent executing tasks inside a join barrier // OMP_task_plain_bar -- Time spent executing tasks inside a barrier // construct // OMP_taskloop_scheduling -- Time spent scheduling tasks inside a taskloop // construct // OMP_plain_barrier -- Time spent in a #pragma omp barrier construct or // inside implicit barrier at end of worksharing // construct // OMP_idle -- Time worker threads spend waiting for next // parallel region // OMP_fork_barrier -- Time spent in a the fork barrier surrounding a // parallel region // OMP_join_barrier -- Time spent in a the join barrier surrounding a // parallel region // OMP_serial -- Time thread zero spends executing serial code // OMP_set_numthreads -- Values passed to omp_set_num_threads // OMP_PARALLEL_args -- Number of arguments passed to a parallel region // OMP_loop_static_iterations -- Number of iterations thread is assigned for // statically scheduled loops // OMP_loop_dynamic_iterations -- Number of iterations thread is assigned for // dynamically scheduled loops #if (KMP_DEVELOPER_STATS) // Timers which are of interest to runtime library developers, not end users. // These have to be explicitly enabled in addition to the other stats. // KMP_fork_barrier -- time in __kmp_fork_barrier // KMP_join_barrier -- time in __kmp_join_barrier // KMP_barrier -- time in __kmp_barrier // KMP_end_split_barrier -- time in __kmp_end_split_barrier // KMP_setup_icv_copy -- time in __kmp_setup_icv_copy // KMP_icv_copy -- start/stop timer for any ICV copying // KMP_linear_gather -- time in __kmp_linear_barrier_gather // KMP_linear_release -- time in __kmp_linear_barrier_release // KMP_tree_gather -- time in __kmp_tree_barrier_gather // KMP_tree_release -- time in __kmp_tree_barrier_release // KMP_hyper_gather -- time in __kmp_hyper_barrier_gather // KMP_hyper_release -- time in __kmp_hyper_barrier_release // clang-format off #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) \ macro(KMP_fork_call, 0, arg) \ macro(KMP_join_call, 0, arg) \ macro(KMP_end_split_barrier, 0, arg) \ macro(KMP_hier_gather, 0, arg) \ macro(KMP_hier_release, 0, arg) \ macro(KMP_hyper_gather, 0, arg) \ macro(KMP_hyper_release, 0, arg) \ macro(KMP_linear_gather, 0, arg) \ macro(KMP_linear_release, 0, arg) \ macro(KMP_tree_gather, 0, arg) \ macro(KMP_tree_release, 0, arg) \ macro(USER_resume, 0, arg) \ macro(USER_suspend, 0, arg) \ macro(KMP_allocate_team, 0, arg) \ macro(KMP_setup_icv_copy, 0, arg) \ macro(USER_icv_copy, 0, arg) \ macro (FOR_static_steal_stolen, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (FOR_static_steal_chunks, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) #else #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) #endif // clang-format on /*! * \brief Add new explicit timers under KMP_FOREACH_EXPLICIT_TIMER() macro. * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \warning YOU MUST HAVE THE SAME NAMED TIMER UNDER KMP_FOREACH_TIMER() OR ELSE * BAD THINGS WILL HAPPEN! * * \details Explicit timers are ones where we need to allocate a timer itself * (as well as the accumulated timing statistics). We allocate these on a * per-thread basis, and explicitly start and stop them. Block timers just * allocate the timer itself on the stack, and use the destructor to notice * block exit; they don't need to be defined here. The name here should be the * same as that of a timer above. * * @ingroup STATS_GATHERING */ #define KMP_FOREACH_EXPLICIT_TIMER(macro, arg) KMP_FOREACH_TIMER(macro, arg) #define ENUMERATE(name, ignore, prefix) prefix##name, enum timer_e { KMP_FOREACH_TIMER(ENUMERATE, TIMER_) TIMER_LAST }; enum explicit_timer_e { KMP_FOREACH_EXPLICIT_TIMER(ENUMERATE, EXPLICIT_TIMER_) EXPLICIT_TIMER_LAST }; enum counter_e { KMP_FOREACH_COUNTER(ENUMERATE, COUNTER_) COUNTER_LAST }; #undef ENUMERATE /* * A logarithmic histogram. It accumulates the number of values in each power of * ten bin. So 1<=x<10, 10<=x<100, ... * Mostly useful where we have some big outliers and want to see information * about them. */ class logHistogram { enum { numBins = 31, /* Number of powers of 10. If this changes you need to change * the initializer for binMax */ /* * If you want to use this to analyse values that may be less than 1, (for * instance times in s), then the logOffset gives you negative powers. * In our case here, we're just looking at times in ticks, or counts, so we * can never see values with magnitude < 1 (other than zero), so we can set * it to 0. As above change the initializer if you change this. */ logOffset = 0 }; uint32_t KMP_ALIGN_CACHE zeroCount; struct { uint32_t count; double total; } bins[numBins]; static double binMax[numBins]; #ifdef KMP_DEBUG uint64_t _total; void check() const { uint64_t t = zeroCount; for (int i = 0; i < numBins; i++) t += bins[i].count; KMP_DEBUG_ASSERT(t == _total); } #else void check() const {} #endif public: logHistogram() { reset(); } logHistogram(logHistogram const &o) { for (int i = 0; i < numBins; i++) bins[i] = o.bins[i]; #ifdef KMP_DEBUG _total = o._total; #endif } void reset() { zeroCount = 0; for (int i = 0; i < numBins; i++) { bins[i].count = 0; bins[i].total = 0; } #ifdef KMP_DEBUG _total = 0; #endif } uint32_t count(int b) const { return bins[b + logOffset].count; } double total(int b) const { return bins[b + logOffset].total; } static uint32_t findBin(double sample); logHistogram &operator+=(logHistogram const &o) { zeroCount += o.zeroCount; for (int i = 0; i < numBins; i++) { bins[i].count += o.bins[i].count; bins[i].total += o.bins[i].total; } #ifdef KMP_DEBUG _total += o._total; check(); #endif return *this; } void addSample(double sample); int minBin() const; int maxBin() const; std::string format(char) const; }; class statistic { double KMP_ALIGN_CACHE minVal; double maxVal; double meanVal; double m2; uint64_t sampleCount; double offset; bool collectingHist; logHistogram hist; public: statistic(bool doHist = bool(KMP_STATS_HIST)) { reset(); collectingHist = doHist; } statistic(statistic const &o) : minVal(o.minVal), maxVal(o.maxVal), meanVal(o.meanVal), m2(o.m2), sampleCount(o.sampleCount), offset(o.offset), collectingHist(o.collectingHist), hist(o.hist) {} statistic(double minv, double maxv, double meanv, uint64_t sc, double sd) : minVal(minv), maxVal(maxv), meanVal(meanv), m2(sd * sd * sc), sampleCount(sc), offset(0.0), collectingHist(false) {} bool haveHist() const { return collectingHist; } double getMin() const { return minVal; } double getMean() const { return meanVal; } double getMax() const { return maxVal; } uint64_t getCount() const { return sampleCount; } double getSD() const { return sqrt(m2 / sampleCount); } double getTotal() const { return sampleCount * meanVal; } logHistogram const *getHist() const { return &hist; } void setOffset(double d) { offset = d; } void reset() { minVal = std::numeric_limits<double>::max(); maxVal = -minVal; meanVal = 0.0; m2 = 0.0; sampleCount = 0; offset = 0.0; hist.reset(); } void addSample(double sample); void scale(double factor); void scaleDown(double f) { scale(1. / f); } void forceCount(uint64_t count) { sampleCount = count; } statistic &operator+=(statistic const &other); std::string format(char unit, bool total = false) const; std::string formatHist(char unit) const { return hist.format(unit); } }; struct statInfo { const char *name; uint32_t flags; }; class timeStat : public statistic { static statInfo timerInfo[]; public: timeStat() : statistic() {} static const char *name(timer_e e) { return timerInfo[e].name; } static bool noTotal(timer_e e) { return timerInfo[e].flags & stats_flags_e::noTotal; } static bool masterOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::onlyInMaster; } static bool workerOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::notInMaster; } static bool noUnits(timer_e e) { return timerInfo[e].flags & stats_flags_e::noUnits; } static bool logEvent(timer_e e) { return timerInfo[e].flags & stats_flags_e::logEvent; } static void clearEventFlags() { for (int i = 0; i < TIMER_LAST; i++) { timerInfo[i].flags &= (~(stats_flags_e::logEvent)); } } }; // Where we need explicitly to start and end the timer, this version can be used // Since these timers normally aren't nicely scoped, so don't have a good place // to live on the stack of the thread, they're more work to use. class explicitTimer { timeStat *stat; timer_e timerEnumValue; tsc_tick_count startTime; tsc_tick_count pauseStartTime; tsc_tick_count::tsc_interval_t totalPauseTime; public: explicitTimer(timeStat *s, timer_e te) : stat(s), timerEnumValue(te), startTime(), pauseStartTime(0), totalPauseTime() {} // void setStat(timeStat *s) { stat = s; } void start(tsc_tick_count tick); void pause(tsc_tick_count tick) { pauseStartTime = tick; } void resume(tsc_tick_count tick) { totalPauseTime += (tick - pauseStartTime); } void stop(tsc_tick_count tick, kmp_stats_list *stats_ptr = nullptr); void reset() { startTime = 0; pauseStartTime = 0; totalPauseTime = 0; } timer_e get_type() const { return timerEnumValue; } }; // Where you need to partition a threads clock ticks into separate states // e.g., a partitionedTimers class with two timers of EXECUTING_TASK, and // DOING_NOTHING would render these conditions: // time(EXECUTING_TASK) + time(DOING_NOTHING) = total time thread is alive // No clock tick in the EXECUTING_TASK is a member of DOING_NOTHING and vice // versa class partitionedTimers { private: std::vector<explicitTimer> timer_stack; public: partitionedTimers(); void init(explicitTimer timer); void exchange(explicitTimer timer); void push(explicitTimer timer); void pop(); void windup(); }; // Special wrapper around the partitioned timers to aid timing code blocks // It avoids the need to have an explicit end, leaving the scope suffices. class blockPartitionedTimer { partitionedTimers *part_timers; public: blockPartitionedTimer(partitionedTimers *pt, explicitTimer timer) : part_timers(pt) { part_timers->push(timer); } ~blockPartitionedTimer() { part_timers->pop(); } }; // Special wrapper around the thread state to aid in keeping state in code // blocks It avoids the need to have an explicit end, leaving the scope // suffices. class blockThreadState { stats_state_e *state_pointer; stats_state_e old_state; public: blockThreadState(stats_state_e *thread_state_pointer, stats_state_e new_state) : state_pointer(thread_state_pointer), old_state(*thread_state_pointer) { *state_pointer = new_state; } ~blockThreadState() { *state_pointer = old_state; } }; // If all you want is a count, then you can use this... // The individual per-thread counts will be aggregated into a statistic at // program exit. class counter { uint64_t value; static const statInfo counterInfo[]; public: counter() : value(0) {} void increment() { value++; } uint64_t getValue() const { return value; } void reset() { value = 0; } static const char *name(counter_e e) { return counterInfo[e].name; } static bool masterOnly(counter_e e) { return counterInfo[e].flags & stats_flags_e::onlyInMaster; } }; /* **************************************************************** Class to implement an event There are four components to an event: start time, stop time nest_level, and timer_name. The start and stop time should be obvious (recorded in clock ticks). The nest_level relates to the bar width in the timeline graph. The timer_name is used to determine which timer event triggered this event. the interface to this class is through four read-only operations: 1) getStart() -- returns the start time as 64 bit integer 2) getStop() -- returns the stop time as 64 bit integer 3) getNestLevel() -- returns the nest level of the event 4) getTimerName() -- returns the timer name that triggered event *MORE ON NEST_LEVEL* The nest level is used in the bar graph that represents the timeline. Its main purpose is for showing how events are nested inside eachother. For example, say events, A, B, and C are recorded. If the timeline looks like this: Begin -------------------------------------------------------------> Time | | | | | | A B C C B A start start start end end end Then A, B, C will have a nest level of 1, 2, 3 respectively. These values are then used to calculate the barwidth so you can see that inside A, B has occurred, and inside B, C has occurred. Currently, this is shown with A's bar width being larger than B's bar width, and B's bar width being larger than C's bar width. **************************************************************** */ class kmp_stats_event { uint64_t start; uint64_t stop; int nest_level; timer_e timer_name; public: kmp_stats_event() : start(0), stop(0), nest_level(0), timer_name(TIMER_LAST) {} kmp_stats_event(uint64_t strt, uint64_t stp, int nst, timer_e nme) : start(strt), stop(stp), nest_level(nst), timer_name(nme) {} inline uint64_t getStart() const { return start; } inline uint64_t getStop() const { return stop; } inline int getNestLevel() const { return nest_level; } inline timer_e getTimerName() const { return timer_name; } }; /* **************************************************************** Class to implement a dynamically expandable array of events --------------------------------------------------------- | event 1 | event 2 | event 3 | event 4 | ... | event N | --------------------------------------------------------- An event is pushed onto the back of this array at every explicitTimer->stop() call. The event records the thread #, start time, stop time, and nest level related to the bar width. The event vector starts at size INIT_SIZE and grows (doubles in size) if needed. An implication of this behavior is that log(N) reallocations are needed (where N is number of events). If you want to avoid reallocations, then set INIT_SIZE to a large value. the interface to this class is through six operations: 1) reset() -- sets the internal_size back to 0 but does not deallocate any memory 2) size() -- returns the number of valid elements in the vector 3) push_back(start, stop, nest, timer_name) -- pushes an event onto the back of the array 4) deallocate() -- frees all memory associated with the vector 5) sort() -- sorts the vector by start time 6) operator[index] or at(index) -- returns event reference at that index **************************************************************** */ class kmp_stats_event_vector { kmp_stats_event *events; int internal_size; int allocated_size; static const int INIT_SIZE = 1024; public: kmp_stats_event_vector() { events = (kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE); internal_size = 0; allocated_size = INIT_SIZE; } ~kmp_stats_event_vector() {} inline void reset() { internal_size = 0; } inline int size() const { return internal_size; } void push_back(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { int i; if (internal_size == allocated_size) { kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate( sizeof(kmp_stats_event) * allocated_size * 2); for (i = 0; i < internal_size; i++) tmp[i] = events[i]; __kmp_free(events); events = tmp; allocated_size *= 2; } events[internal_size] = kmp_stats_event(start_time, stop_time, nest_level, name); internal_size++; return; } void deallocate(); void sort(); const kmp_stats_event &operator[](int index) const { return events[index]; } kmp_stats_event &operator[](int index) { return events[index]; } const kmp_stats_event &at(int index) const { return events[index]; } kmp_stats_event &at(int index) { return events[index]; } }; /* **************************************************************** Class to implement a doubly-linked, circular, statistics list |---| ---> |---| ---> |---| ---> |---| ---> ... next | | | | | | | | |---| <--- |---| <--- |---| <--- |---| <--- ... prev Sentinel first second third Node node node node The Sentinel Node is the user handle on the list. The first node corresponds to thread 0's statistics. The second node corresponds to thread 1's statistics and so on... Each node has a _timers, _counters, and _explicitTimers array to hold that thread's statistics. The _explicitTimers point to the correct _timer and update its statistics at every stop() call. The explicitTimers' pointers are set up in the constructor. Each node also has an event vector to hold that thread's timing events. The event vector expands as necessary and records the start-stop times for each timer. The nestLevel variable is for plotting events and is related to the bar width in the timeline graph. Every thread will have a thread local pointer to its node in the list. The sentinel node is used by the master thread to store "dummy" statistics before __kmp_create_worker() is called. **************************************************************** */ class kmp_stats_list { int gtid; timeStat _timers[TIMER_LAST + 1]; counter _counters[COUNTER_LAST + 1]; explicitTimer thread_life_timer; partitionedTimers _partitionedTimers; int _nestLevel; // one per thread kmp_stats_event_vector _event_vector; kmp_stats_list *next; kmp_stats_list *prev; stats_state_e state; int thread_is_idle_flag; public: kmp_stats_list() : thread_life_timer(&_timers[TIMER_OMP_worker_thread_life], TIMER_OMP_worker_thread_life), _nestLevel(0), _event_vector(), next(this), prev(this), state(IDLE), thread_is_idle_flag(0) {} ~kmp_stats_list() {} inline timeStat *getTimer(timer_e idx) { return &_timers[idx]; } inline counter *getCounter(counter_e idx) { return &_counters[idx]; } inline partitionedTimers *getPartitionedTimers() { return &_partitionedTimers; } inline timeStat *getTimers() { return _timers; } inline counter *getCounters() { return _counters; } inline kmp_stats_event_vector &getEventVector() { return _event_vector; } inline void startLife() { thread_life_timer.start(tsc_tick_count::now()); } inline void endLife() { thread_life_timer.stop(tsc_tick_count::now(), this); } inline void resetEventVector() { _event_vector.reset(); } inline void incrementNestValue() { _nestLevel++; } inline int getNestValue() { return _nestLevel; } inline void decrementNestValue() { _nestLevel--; } inline int getGtid() const { return gtid; } inline void setGtid(int newgtid) { gtid = newgtid; } inline void setState(stats_state_e newstate) { state = newstate; } inline stats_state_e getState() const { return state; } inline stats_state_e *getStatePointer() { return &state; } inline bool isIdle() { return thread_is_idle_flag == 1; } inline void setIdleFlag() { thread_is_idle_flag = 1; } inline void resetIdleFlag() { thread_is_idle_flag = 0; } kmp_stats_list *push_back(int gtid); // returns newly created list node inline void push_event(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { _event_vector.push_back(start_time, stop_time, nest_level, name); } void deallocate(); class iterator; kmp_stats_list::iterator begin(); kmp_stats_list::iterator end(); int size(); class iterator { kmp_stats_list *ptr; friend kmp_stats_list::iterator kmp_stats_list::begin(); friend kmp_stats_list::iterator kmp_stats_list::end(); public: iterator(); ~iterator(); iterator operator++(); iterator operator++(int dummy); iterator operator--(); iterator operator--(int dummy); bool operator!=(const iterator &rhs); bool operator==(const iterator &rhs); kmp_stats_list *operator*() const; // dereference operator }; }; /* **************************************************************** Class to encapsulate all output functions and the environment variables This module holds filenames for various outputs (normal stats, events, plot file), as well as coloring information for the plot file. The filenames and flags variables are read from environment variables. These are read once by the constructor of the global variable __kmp_stats_output which calls init(). During this init() call, event flags for the timeStat::timerInfo[] global array are cleared if KMP_STATS_EVENTS is not true (on, 1, yes). The only interface function that is public is outputStats(heading). This function should print out everything it needs to, either to files or stderr, depending on the environment variables described below ENVIRONMENT VARIABLES: KMP_STATS_FILE -- if set, all statistics (not events) will be printed to this file, otherwise, print to stderr KMP_STATS_THREADS -- if set to "on", then will print per thread statistics to either KMP_STATS_FILE or stderr KMP_STATS_PLOT_FILE -- if set, print the ploticus plot file to this filename, otherwise, the plot file is sent to "events.plt" KMP_STATS_EVENTS -- if set to "on", then log events, otherwise, don't log events KMP_STATS_EVENTS_FILE -- if set, all events are outputted to this file, otherwise, output is sent to "events.dat" **************************************************************** */ class kmp_stats_output_module { public: struct rgb_color { float r; float g; float b; }; private: std::string outputFileName; static const char *eventsFileName; static const char *plotFileName; static int printPerThreadFlag; static int printPerThreadEventsFlag; static const rgb_color globalColorArray[]; static rgb_color timerColorInfo[]; void init(); static void setupEventColors(); static void printPloticusFile(); static void printHeaderInfo(FILE *statsOut); static void printTimerStats(FILE *statsOut, statistic const *theStats, statistic const *totalStats); static void printCounterStats(FILE *statsOut, statistic const *theStats); static void printCounters(FILE *statsOut, counter const *theCounters); static void printEvents(FILE *eventsOut, kmp_stats_event_vector *theEvents, int gtid); static rgb_color getEventColor(timer_e e) { return timerColorInfo[e]; } static void windupExplicitTimers(); bool eventPrintingEnabled() const { return printPerThreadEventsFlag; } public: kmp_stats_output_module() { init(); } void outputStats(const char *heading); }; #ifdef __cplusplus extern "C" { #endif void __kmp_stats_init(); void __kmp_stats_fini(); void __kmp_reset_stats(); void __kmp_output_stats(const char *); void __kmp_accumulate_stats_at_exit(void); // thread local pointer to stats node within list extern KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr; // head to stats list. extern kmp_stats_list *__kmp_stats_list; // lock for __kmp_stats_list extern kmp_tas_lock_t __kmp_stats_lock; // reference start time extern tsc_tick_count __kmp_stats_start_time; // interface to output extern kmp_stats_output_module __kmp_stats_output; #ifdef __cplusplus } #endif // Simple, standard interfaces that drop out completely if stats aren't enabled /*! * \brief Adds value to specified timer (name). * * @param name timer name as specified under the KMP_FOREACH_TIMER() macro * @param value double precision sample value to add to statistics for the timer * * \details Use KMP_COUNT_VALUE(name, value) macro to add a particular value to * a timer statistics. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_VALUE(name, value) \ __kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample(value) /*! * \brief Increments specified counter (name). * * @param name counter name as specified under the KMP_FOREACH_COUNTER() macro * * \details Use KMP_COUNT_BLOCK(name, value) macro to increment a statistics * counter for the executing thread. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_BLOCK(name) \ __kmp_stats_thread_ptr->getCounter(COUNTER_##name)->increment() /*! * \brief Outputs the current thread statistics and reset them. * * @param heading_string heading put above the final stats output * * \details Explicitly stops all timers and outputs all stats. Environment * variable, `OMPTB_STATSFILE=filename`, can be used to output the stats to a * filename instead of stderr. Environment variable, * `OMPTB_STATSTHREADS=true|undefined`, can be used to output thread specific * stats. For now the `OMPTB_STATSTHREADS` environment variable can either be * defined with any value, which will print out thread specific stats, or it can * be undefined (not specified in the environment) and thread specific stats * won't be printed. It should be noted that all statistics are reset when this * macro is called. * * @ingroup STATS_GATHERING */ #define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string) /*! * \brief Initializes the partitioned timers to begin with name. * * @param name timer which you want this thread to begin with * * @ingroup STATS_GATHERING */ #define KMP_INIT_PARTITIONED_TIMERS(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->init(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_TIME_PARTITIONED_BLOCK(name) \ blockPartitionedTimer __PBLOCKTIME__( \ __kmp_stats_thread_ptr->getPartitionedTimers(), \ explicitTimer(__kmp_stats_thread_ptr->getTimer(TIMER_##name), \ TIMER_##name)) #define KMP_PUSH_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->push(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_POP_PARTITIONED_TIMER() \ __kmp_stats_thread_ptr->getPartitionedTimers()->pop() #define KMP_EXCHANGE_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->exchange(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_SET_THREAD_STATE(state_name) \ __kmp_stats_thread_ptr->setState(state_name) #define KMP_GET_THREAD_STATE() __kmp_stats_thread_ptr->getState() #define KMP_SET_THREAD_STATE_BLOCK(state_name) \ blockThreadState __BTHREADSTATE__(__kmp_stats_thread_ptr->getStatePointer(), \ state_name) /*! * \brief resets all stats (counters to 0, timers to 0 elapsed ticks) * * \details Reset all stats for all threads. * * @ingroup STATS_GATHERING */ #define KMP_RESET_STATS() __kmp_reset_stats() #if (KMP_DEVELOPER_STATS) #define KMP_COUNT_DEVELOPER_VALUE(n, v) KMP_COUNT_VALUE(n, v) #define KMP_COUNT_DEVELOPER_BLOCK(n) KMP_COUNT_BLOCK(n) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) KMP_TIME_PARTITIONED_BLOCK(n) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) KMP_PUSH_PARTITIONED_TIMER(n) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) KMP_POP_PARTITIONED_TIMER(n) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) \ KMP_EXCHANGE_PARTITIONED_TIMER(n) #else // Null definitions #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #endif #else // KMP_STATS_ENABLED // Null definitions #define KMP_COUNT_VALUE(n, v) ((void)0) #define KMP_COUNT_BLOCK(n) ((void)0) #define KMP_OUTPUT_STATS(heading_string) ((void)0) #define KMP_RESET_STATS() ((void)0) #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_INIT_PARTITIONED_TIMERS(name) ((void)0) #define KMP_TIME_PARTITIONED_BLOCK(name) ((void)0) #define KMP_PUSH_PARTITIONED_TIMER(name) ((void)0) #define KMP_POP_PARTITIONED_TIMER() ((void)0) #define KMP_SET_THREAD_STATE(state_name) ((void)0) #define KMP_GET_THREAD_STATE() ((void)0) #define KMP_SET_THREAD_STATE_BLOCK(state_name) ((void)0) #endif // KMP_STATS_ENABLED #endif // KMP_STATS_H
GB_binop__lxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lxor_int16 // A.*B function (eWiseMult): GB_AemultB__lxor_int16 // A*D function (colscale): GB_AxD__lxor_int16 // D*A function (rowscale): GB_DxB__lxor_int16 // C+=B function (dense accum): GB_Cdense_accumB__lxor_int16 // C+=b function (dense accum): GB_Cdense_accumb__lxor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_int16 // C=scalar+B GB_bind1st__lxor_int16 // C=scalar+B' GB_bind1st_tran__lxor_int16 // C=A+scalar GB_bind2nd__lxor_int16 // C=A'+scalar GB_bind2nd_tran__lxor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ((x != 0) != (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT16 || GxB_NO_LXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lxor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lxor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lxor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lxor_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lxor_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lxor_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lxor_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lxor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lxor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lxor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lxor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pmv-OpenMP-b.c
/* Multiplica una matriz por un vector */ #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #include <time.h> // biblioteca donde se encuentra la función clock_gettime() #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif //#define PRINTF_ALL// comentar para quitar el printf ... // que imprime todos los componentes int main(int argc, char** argv){ int i, j; double cgt1,cgt2; double ncgt; //para tiempo de ejecución //Leer argumento de entrada (nº de componentes del vector) if (argc<2){ printf("Falta tamaño\n"); exit(-1); } unsigned int N = atoi(argv[1]); double *v1; double *v2; double **M; v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes v2 = (double*) malloc(N*sizeof(double)); M = (double**) malloc(N*sizeof(double*)); for(i = 0; i < N; i++) M[i] = (double*) malloc(N*sizeof(double)); for(i=0; i<N; i++){ v1[i] = N*0.1+i*0.1; for(j=0; j<N; j++) M[i][j] = N*0.1-j*0.1; } double suma, sumalocal; cgt1 = omp_get_wtime(); for(i = 0; i < N; i++){ suma = 0; #pragma omp parallel private(sumalocal) { sumalocal = 0; #pragma omp for for(j = 0; j < N; j++) sumalocal += M[i][j]*v1[j]; #pragma omp atomic suma += sumalocal; } v2[i] = suma; } cgt2 = omp_get_wtime(); ncgt = cgt2 - cgt1; //Imprimir resultado y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.):%11.9f\t / Tamaño:%u\n",ncgt,N); printf("Resulado:\n"); for(i=0; i<N; i++) printf("V[%d] = %8.6f /",i,v2[i]); printf("\n"); #else printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ V[0]=%8.6f / / V[%d]=%8.6f /\n",ncgt,N,v2[0],N-1,v2[N-1]); #endif free(v1); // libera el espacio reservado para v1 free(v2); // libera el espacio reservado para v2 for(i = 0; i < N; i++) free(M[i]); free(M); return 0; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unop__identity_fp32_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_int8) // op(A') function: GB (_unop_tran__identity_fp32_int8) // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_int8) ( float *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_unop__ceil_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ceil_fp32_fp32 // op(A') function: GB_unop_tran__ceil_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = ceilf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ceilf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = ceilf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CEIL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ceil_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = ceilf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = ceilf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ceil_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
taskyield_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * A bunch of n tasks (1st arg) are created by a single thread. * Each task creates two tasks more and inside the second one * a taskyield directive is called */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define NUM_TASKS 50000 #define NUM_REPS 1 int o = 0; int pp = 0; void na(float value) { o++; } void sscal(float value, float *a) { *a = *a * value; } void presscal(float value, float *a) { #pragma omp task { sscal(value, a); } #pragma omp task { na(value); #pragma omp taskyield } } int main(int argc, char *argv[]) { int i, r, nthreads; double *time, avg_time = 0.0; char *str, *endptr; float *a; double time2 = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } if (argc > 1) { str = argv[1]; } int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS; if (ntasks < nthreads) ntasks = nthreads; int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS; time = malloc(sizeof(double) * rep); a = malloc(sizeof(float) * ntasks); for (i = 0; i < ntasks; i++) { a[i] = i + 100.0f; } for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel { #pragma omp single { time2 = omp_get_wtime(); for (i = 0; i < ntasks; i++) { #pragma omp task firstprivate(i) { presscal(0.9f, &a[i]); } } time2 = omp_get_wtime() - time2; } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } for (i = 0; i < ntasks; i++) { if (a[i] != (i + 100.0f) * 0.9f) { printf("error: a[%d]=%2.f expected %2.f\n", i, a[i], (i + 100.0f) * 0.9f); } } avg_time /= rep; printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n", nthreads, ntasks, avg_time, time2); printf("o=%d and it should be %d\n", o, ntasks); printf("pp=%d and it should be %d\n", pp, ntasks); return EXIT_SUCCESS; }
NETLMv2_fmt_plug.c
/* * NETLMv2_fmt.c -- LMv2 Challenge/Response * * Written by JoMo-Kun <jmk at foofus.net> in 2008 * and placed in the public domain. * * Performance fixes, OMP and utf-8 support by magnum 2010-2011 * * This algorithm is designed for performing brute-force cracking of the LMv2 * challenge/response sets exchanged during network-based authentication * attempts [1]. The captured challenge/response set from these attempts * should be stored using the following format: * * USERNAME::DOMAIN:SERVER CHALLENGE:LMv2 RESPONSE:CLIENT CHALLENGE * * For example: * Administrator::WORKGROUP:1122334455667788:6759A5A7EFB25452911DE7DE8296A0D8:F503236B200A5B3A * * It should be noted that a LMv2 authentication response is not same as a LM * password hash, which can be extracted using tools such as FgDump [2]. In * fact, a NTLM hash and not a LM hash is used within the LMv2 algorithm. LMv2 * challenge/response authentication typically takes place when the GPO * "Network Security: LAN Manager authentication level" is configured to a setting * that enforces the use of NTLMv2, such as "Send NTLMv2 response only\refuse * LM & NTLM." * * LMv2 responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theLmv2Response * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETLMv2; #elif FMT_REGISTERS_H john_register_one(&fmt_NETLMv2); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "md5.h" #include "hmacmd5.h" #include "byteorder.h" #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "netlmv2" #define FORMAT_NAME "LMv2 C/R" #define FORMAT_TAG "$NETLMv2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD4 HMAC-MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 /* lmcons.h - PWLEN (256) ? 127 ? */ #define USERNAME_LENGTH 60 /* lmcons.h - UNLEN (256) / LM20_UNLEN (20) */ #define DOMAIN_LENGTH 45 /* lmcons.h - CNLEN / DNLEN */ #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define CHALLENGE_LENGTH 32 #define SALT_SIZE 16 + 1 + 2 * (USERNAME_LENGTH + DOMAIN_LENGTH) + 1 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 32 #define TOTAL_LENGTH 12 + USERNAME_LENGTH + DOMAIN_LENGTH + CHALLENGE_LENGTH + CIPHERTEXT_LENGTH // these may be altered in init() if running OMP #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #ifndef OMP_SCALE #define OMP_SCALE 1536 #endif static struct fmt_tests tests[] = { {"", "1337adminPASS", {"FOODOM\\Administrator", "", "", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} }, {"$NETLMv2$ADMINISTRATORFOODOM$1122334455667788$6F64C5C1E35F68DD80388C0F00F34406$F0F3FF27037AA69F", "1337adminPASS"}, {"$NETLMv2$USER1$1122334455667788$B1D163EA5881504F3963DC50FCDC26C1$EB4D9E8138149E20", "foobar"}, // repeat in exactly the same format that is used in john.pot (lower case hex) {"$NETLMv2$USER1$1122334455667788$b1d163ea5881504f3963dc50fcdc26c1$eb4d9e8138149e20", "foobar"}, {"$NETLMv2$ATEST$1122334455667788$83B59F1536D3321DBF1FAEC14ADB1675$A1E7281FE8C10E53", "SomeFancyP4$$w0rdHere"}, {"", "1337adminPASS", {"administrator", "", "FOODOM", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} }, {"", "foobar", {"user1", "", "", "1122334455667788", "B1D163EA5881504F3963DC50FCDC26C1", "EB4D9E8138149E20"} }, {"", "SomeFancyP4$$w0rdHere", {"aTest", "", "", "1122334455667788", "83B59F1536D3321DBF1FAEC14ADB1675", "A1E7281FE8C10E53"} }, {NULL} }; static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static uchar (*output)[BINARY_SIZE]; static HMACMD5Context (*saved_ctx); static int keys_prepared; static unsigned char *challenge; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output)); saved_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_ctx)); } static void done(void) { MEM_FREE(saved_ctx); MEM_FREE(output); MEM_FREE(saved_len); MEM_FREE(saved_plain); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; pos = &ciphertext[FORMAT_TAG_LEN]; /* Validate Username and Domain Length */ for (pos2 = pos; *pos2 != '$'; pos2++) if ((unsigned char)*pos2 < 0x20) return 0; if ( !(*pos2 && (pos2 - pos <= USERNAME_LENGTH + DOMAIN_LENGTH)) ) return 0; /* Validate Server Challenge Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) ) return 0; /* Validate LMv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; /* Validate Client Challenge Length */ pos2++; pos = pos2; for (; atoi16[ARCH_INDEX(*pos2)] != 0x7F; pos2++); if (pos2 - pos != CHALLENGE_LENGTH / 2) return 0; if (pos2[0] != '\0') return 0; return 1; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *login = split_fields[0]; char *uid = split_fields[2]; char *srv_challenge = split_fields[3]; char *nethashv2 = split_fields[4]; char *cli_challenge = split_fields[5]; char *identity = NULL, *tmp; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) return split_fields[1]; if (!login || !uid || !srv_challenge || !nethashv2 || !cli_challenge) return split_fields[1]; /* DOMAIN\USER: -or- USER::DOMAIN: */ if ((tmp = strstr(login, "\\")) != NULL) { identity = (char *) mem_alloc(strlen(login)*2 + 1); strcpy(identity, tmp + 1); /* Upper-Case Username - Not Domain */ enc_strupper(identity); strncat(identity, login, tmp - login); } else { identity = (char *) mem_alloc(strlen(login)*2 + strlen(uid) + 1); strcpy(identity, login); enc_strupper(identity); strcat(identity, uid); } tmp = (char *) mem_alloc(9 + strlen(identity) + 1 + strlen(srv_challenge) + 1 + strlen(nethashv2) + 1 + strlen(cli_challenge) + 1); sprintf(tmp, "%s%s$%s$%s$%s", FORMAT_TAG, identity, srv_challenge, nethashv2, cli_challenge); MEM_FREE(identity); if (valid(tmp, self)) { char *cp = str_alloc_copy(tmp); MEM_FREE(tmp); return cp; } MEM_FREE(tmp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1]; char *pos = NULL; int identity_length = 0; /* Calculate identity length */ for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); memset(out, 0, TOTAL_LENGTH + 1); memcpy(out, ciphertext, strlen(ciphertext)); strlwr(&out[FORMAT_TAG_LEN + identity_length + 1]); /* Exclude: $NETLMv2$USERDOMAIN$ */ return out; } static void *get_binary(char *ciphertext) { static uchar *binary; char *pos = NULL; int i, identity_length; if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); ciphertext += FORMAT_TAG_LEN + identity_length + 1 + CHALLENGE_LENGTH / 2 + 1; for (i=0; i<BINARY_SIZE; i++) { binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } return binary; } /* Calculate the LMv2 response for the given challenge, using the specified authentication identity (username and domain), password and client nonce. */ static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i = 0; #ifdef _OPENMP #pragma omp parallel for for (i = 0; i < count; i++) #endif { unsigned char ntlm_v2_hash[16]; HMACMD5Context ctx; // can't be moved above the OMP pragma if (!keys_prepared) { int len; unsigned char ntlm[16]; /* Generate 16-byte NTLM hash */ len = E_md4hash(saved_plain[i], saved_len[i], ntlm); // We do key setup of the next HMAC_MD5 here (once per salt) hmac_md5_init_K16(ntlm, &saved_ctx[i]); if (len <= 0) saved_plain[i][-len] = 0; // match truncation } /* HMAC-MD5(Username + Domain, NTLM Hash) */ memcpy(&ctx, &saved_ctx[i], sizeof(ctx)); hmac_md5_update(&challenge[17], (int)challenge[16], &ctx); hmac_md5_final(ntlm_v2_hash, &ctx); /* Generate 16-byte non-client nonce portion of LMv2 Response */ /* HMAC-MD5(Challenge + Nonce, NTLMv2 Hash) + Nonce */ hmac_md5(ntlm_v2_hash, challenge, 16, (unsigned char*)output[i]); } keys_prepared = 1; return count; } static int cmp_all(void *binary, int count) { int index; for (index=0; index<count; index++) if (!memcmp(output[index], binary, BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(output[index], binary, BINARY_SIZE); } static int cmp_exact(char *source, int index) { return !memcmp(output[index], get_binary(source), BINARY_SIZE); } /* We're essentially using three salts, but we're going to pack it into a single blob for now. |Client Challenge (8 Bytes)|Server Challenge (8 Bytes)|Unicode(Username (<=20).Domain (<=15)) */ static void *get_salt(char *ciphertext) { static unsigned char *binary_salt; unsigned char identity[USERNAME_LENGTH + DOMAIN_LENGTH + 1]; UTF16 identity_ucs2[USERNAME_LENGTH + DOMAIN_LENGTH + 1]; int i, identity_length; int identity_ucs2_length; char *pos = NULL; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); memset(binary_salt, 0, SALT_SIZE); /* Calculate identity length */ for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); /* Convert identity (username + domain) string to NT unicode */ strnzcpy((char *)identity, ciphertext + FORMAT_TAG_LEN, sizeof(identity)); identity_ucs2_length = enc_to_utf16((UTF16 *)identity_ucs2, USERNAME_LENGTH + DOMAIN_LENGTH, (UTF8 *)identity, identity_length) * sizeof(int16_t); if (identity_ucs2_length < 0) // Truncated at Unicode conversion. identity_ucs2_length = strlen16((UTF16 *)identity_ucs2) * sizeof(int16_t); binary_salt[16] = (unsigned char)identity_ucs2_length; memcpy(&binary_salt[17], (char *)identity_ucs2, identity_ucs2_length); /* Set server challenge */ ciphertext += FORMAT_TAG_LEN + identity_length + 1; for (i = 0; i < 8; i++) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; /* Set client challenge */ ciphertext += 2 + CHALLENGE_LENGTH / 2 + CIPHERTEXT_LENGTH; for (i = 0; i < 8; ++i) binary_salt[i + 8] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; /* Return a concatenation of the server and client challenges and the identity value */ return (void*)binary_salt; } static void set_salt(void *salt) { challenge = salt; } static void set_key(char *key, int index) { saved_len[index] = strlen(key); memcpy((char *)saved_plain[index], key, saved_len[index] + 1); keys_prepared = 0; } static char *get_key(int index) { return (char *)saved_plain[index]; } static int salt_hash(void *salt) { // Hash the client challenge (in case server salt was spoofed) return (*(uint32_t *)salt+8) & (SALT_HASH_SIZE - 1); } static int get_hash_0(int index) { return *(uint32_t *)output[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(uint32_t *)output[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(uint32_t *)output[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(uint32_t *)output[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(uint32_t *)output[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(uint32_t *)output[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(uint32_t *)output[index] & PH_MASK_6; } struct fmt_main fmt_NETLMv2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
privatemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // tmp should be put as private to avoid race condition #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int tmp; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for for (i=0;i<len;i++) { tmp =a[i]+i; a[i] = tmp; } printf("a[50]=%d\n", a[50]); return 0; }
GB_binop__div_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8) // A*D function (colscale): GB (_AxD__div_uint8) // D*A function (rowscale): GB (_DxB__div_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8) // C=scalar+B GB (_bind1st__div_uint8) // C=scalar+B' GB (_bind1st_tran__div_uint8) // C=A+scalar GB (_bind2nd__div_uint8) // C=A'+scalar GB (_bind2nd_tran__div_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__div_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \ } GrB_Info GB (_bind1st_tran__div_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \ } GrB_Info GB (_bind2nd_tran__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__tanh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__tanh_fp32_fp32 // op(A') function: GB_unop_tran__tanh_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = tanhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tanhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = tanhf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TANH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__tanh_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = tanhf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = tanhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__tanh_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MolecularMechanics.c
/* MolecularMechanics.c */ /********************************************************************************************************** Copyright (c) 2002-2013 Abdul-Rahman Allouche. All rights reserved Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Gabedit), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ************************************************************************************************************/ #include "../../Config.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #ifdef ENABLE_OMP #include <omp.h> #endif #include "../Common/Global.h" #include "../Utils/AtomsProp.h" #include "../Utils/Utils.h" #include "../Utils/Constants.h" #include "../Geometry/Fragments.h" #include "../Geometry/DrawGeom.h" #include "Atom.h" #include "Molecule.h" #include "ForceField.h" #include "MolecularMechanics.h" #include "LoadMMParameters.h" #include "CreatePersonalMMFile.h" #include "CreateMolecularMechanicsFile.h" static AmberParameters* staticAmberParameters = NULL; /* static void calculateGradientNumericAmber(ForceField* forceField);*/ static void calculateGradientAmber(ForceField* forceField); static void calculateEnergyAmber(ForceField* forceField); static gdouble calculateEnergyTmpAmber(ForceField* forceField,Molecule* m); /**********************************************************************/ AmberParameters newAmberParameters() { AmberParameters amberParameters; amberParameters.numberOfTypes = 0; amberParameters.atomTypes = NULL; amberParameters.numberOfStretchTerms = 0; amberParameters.bondStretchTerms = NULL; amberParameters.numberOfBendTerms = 0; amberParameters.angleBendTerms = NULL; amberParameters.numberOfDihedralTerms = 0; amberParameters.dihedralAngleTerms = NULL; amberParameters.numberOfImproperTorsionTerms = 0; amberParameters.improperTorsionTerms = NULL; amberParameters.numberOfNonBonded = 0; amberParameters.nonBondedTerms = NULL; amberParameters.numberOfHydrogenBonded = 0; amberParameters.hydrogenBondedTerms = NULL; amberParameters.numberOfPairWise = 0; amberParameters.pairWiseTerms = NULL; return amberParameters; } /**********************************************************************/ static void freeAmberParameters(AmberParameters* amberParameters) { gint i; for(i=0;i<amberParameters->numberOfTypes;i++) if(amberParameters->atomTypes[i].name) g_free(amberParameters->atomTypes[i].name); for(i=0;i<amberParameters->numberOfTypes;i++) if(amberParameters->atomTypes[i].name) g_free(amberParameters->atomTypes[i].description); amberParameters->numberOfTypes = 0; if(amberParameters->atomTypes ) g_free(amberParameters->atomTypes ); amberParameters->atomTypes = NULL; amberParameters->numberOfStretchTerms = 0; if(amberParameters->bondStretchTerms) g_free(amberParameters->bondStretchTerms); amberParameters->bondStretchTerms = NULL; amberParameters->numberOfBendTerms = 0; if(amberParameters->angleBendTerms) g_free(amberParameters->angleBendTerms); amberParameters->angleBendTerms = NULL; for(i=0;i<amberParameters->numberOfDihedralTerms;i++) { if(amberParameters->dihedralAngleTerms[i].divisor) g_free(amberParameters->dihedralAngleTerms[i].divisor); if(amberParameters->dihedralAngleTerms[i].barrier) g_free(amberParameters->dihedralAngleTerms[i].barrier); if(amberParameters->dihedralAngleTerms[i].phase) g_free(amberParameters->dihedralAngleTerms[i].phase); if(amberParameters->dihedralAngleTerms[i].n) g_free(amberParameters->dihedralAngleTerms[i].n); } amberParameters->numberOfDihedralTerms = 0; if(amberParameters->dihedralAngleTerms) g_free(amberParameters->dihedralAngleTerms); amberParameters->dihedralAngleTerms = NULL; amberParameters->numberOfImproperTorsionTerms = 0; if(amberParameters->improperTorsionTerms) g_free(amberParameters->improperTorsionTerms); amberParameters->improperTorsionTerms = NULL; amberParameters->numberOfNonBonded = 0; if(amberParameters->nonBondedTerms) g_free(amberParameters->nonBondedTerms); amberParameters->nonBondedTerms = NULL; amberParameters->numberOfHydrogenBonded = 0; if(amberParameters->hydrogenBondedTerms) g_free(amberParameters->hydrogenBondedTerms); amberParameters->hydrogenBondedTerms = NULL; } /**********************************************************************/ static gint getNumberType(AmberParameters* amberParameters, gchar* type) { gint i; gint nTypes = amberParameters->numberOfTypes; AmberAtomTypes* types = amberParameters->atomTypes; gint len = strlen(type); if(strcmp(type,"X")==0) return -1; for(i=0;i<nTypes;i++) { if(len == (gint)strlen(types[i].name) && strstr(types[i].name,type)) return types[i].number; } return -2; } /**********************************************************************/ static ForceField newAmberModel() { ForceField forceField = newForceField(); forceField.klass->calculateGradient = calculateGradientAmber; /*forceField.klass->calculateGradient = calculateGradientNumericAmber;*/ forceField.klass->calculateEnergy = calculateEnergyAmber; forceField.klass->calculateEnergyTmp = calculateEnergyTmpAmber; forceField.options.type = AMBER; forceField.options.coulomb = TRUE; forceField.options.hydrogenBonded = TRUE; forceField.options.improperTorsion = TRUE; return forceField; } /**********************************************************************/ static ForceField newPairWiseModel() { ForceField forceField = newForceField(); forceField.klass->calculateGradient = calculateGradientAmber; forceField.klass->calculateEnergy = calculateEnergyAmber; forceField.klass->calculateEnergyTmp = calculateEnergyTmpAmber; forceField.options.type = PAIRWISE; forceField.options.coulomb = TRUE; forceField.options.vanderWals = TRUE; forceField.options.bondStretch = FALSE; forceField.options.angleBend = FALSE; forceField.options.dihedralAngle = FALSE; forceField.options.improperTorsion = FALSE; forceField.options.nonBonded = FALSE; forceField.options.hydrogenBonded = FALSE; return forceField; } /**********************************************************************/ static gboolean isIonic(gchar* mmType) { if(!strcmp(mmType,"Li")) return TRUE; if(!strcmp(mmType,"Na")) return TRUE; if(!strcmp(mmType,"K")) return TRUE; if(!strcmp(mmType,"Rb")) return TRUE; if(!strcmp(mmType,"Cs")) return TRUE; if(!strcmp(mmType,"Ca")) return TRUE; if(!strcmp(mmType,"Sr")) return TRUE; if(!strcmp(mmType,"Ba")) return TRUE; if(!strcmp(mmType,"Zn")) return TRUE; if(!strcmp(mmType,"IB")) return TRUE; if(!strcmp(mmType,"Cl")) return TRUE; return FALSE; } /**********************************************************************/ static gboolean getStretchParameters( AmberParameters* amberParameters, gint a1Type, gint a2Type, gdouble* forceConstant,gdouble* equilibriumDistance) { gint i; forceConstant[0] = 0.0; equilibriumDistance[0] = 0.0; if(a1Type>a2Type) { gint t; t = a1Type; a1Type = a2Type; a2Type = t; } for(i=0;i<amberParameters->numberOfStretchTerms;i++) { if( a1Type == amberParameters->bondStretchTerms[i].numbers[0] && a2Type == amberParameters->bondStretchTerms[i].numbers[1] ) { forceConstant[0] = amberParameters->bondStretchTerms[i].forceConstant; equilibriumDistance[0] = amberParameters->bondStretchTerms[i].equilibriumDistance; return TRUE; } } return FALSE; } /**********************************************************************/ static gboolean getBendParameters(AmberParameters* amberParameters,gint a1Type, gint a2Type, gint a3Type, gdouble* forceConstant, gdouble* equilibriumAngle) { gint i; forceConstant[0] = 0.0; equilibriumAngle[0] = 0.0; if(a1Type>a3Type) { gint t; t = a1Type; a1Type = a3Type; a3Type = t; } for(i=0;i<amberParameters->numberOfBendTerms;i++) { if( a1Type == amberParameters->angleBendTerms[i].numbers[0] && a2Type == amberParameters->angleBendTerms[i].numbers[1] && a3Type == amberParameters->angleBendTerms[i].numbers[2] ) { forceConstant[0] = amberParameters->angleBendTerms[i].forceConstant; equilibriumAngle[0] = amberParameters->angleBendTerms[i].equilibriumAngle; return TRUE; } } return FALSE; } /**********************************************************************/ static gboolean getHydrogenBondedParameters(AmberParameters* amberParameters, gint a1Type, gint a2Type, gdouble c[], gdouble d[] ) { gint i; AmberAtomTypes* types = amberParameters->atomTypes; c[0] = 0.0; d[0] = 0.0; if(types[a2Type].name[0]=='H') { gint t = a1Type; a1Type = a2Type; a2Type = t; } for(i=0;i<amberParameters->numberOfHydrogenBonded;i++) { if( a1Type == amberParameters->hydrogenBondedTerms[i].numbers[0] && a2Type == amberParameters->hydrogenBondedTerms[i].numbers[1] ) { c[0] = amberParameters->hydrogenBondedTerms[i].c; d[0] = amberParameters->hydrogenBondedTerms[i].d; return TRUE; } } return FALSE; } /**********************************************************************/ static gboolean getNonBondedParameters(AmberParameters* amberParameters, gint atomType, gdouble* r, gdouble* epsilon ) { gint i; r[0] = 1.0; epsilon[0] = 0.0; for(i=0;i<amberParameters->numberOfNonBonded;i++) { if( atomType == amberParameters->nonBondedTerms[i].number ) { r[0] = amberParameters->nonBondedTerms[i].r; epsilon[0] = amberParameters->nonBondedTerms[i].epsilon; /*printf("r = %f eps = %f\n",r[0],epsilon[0]);*/ return TRUE; } } return FALSE; } /**********************************************************************/ static gboolean getPairWiseParameters(AmberParameters* amberParameters, gint a1Type, gint a2Type, gdouble* a, gdouble* beta, gdouble* c6, gdouble* c8, gdouble* c10, gdouble* b) { gint i; a[0] = 0.0; beta[0] = 1.0; c6[0] = 0.0; c8[0] = 0.0; c10[0] = 0.0; b[0] = 1.0; for(i=0;i<amberParameters->numberOfPairWise;i++) { if( ( a1Type == amberParameters->pairWiseTerms[i].numbers[0] && a2Type == amberParameters->pairWiseTerms[i].numbers[1] ) || ( a1Type == amberParameters->pairWiseTerms[i].numbers[1] && a2Type == amberParameters->pairWiseTerms[i].numbers[0] ) ) { a[0] = amberParameters->pairWiseTerms[i].a; beta[0] = amberParameters->pairWiseTerms[i].beta; c6[0] = amberParameters->pairWiseTerms[i].c6; c8[0] = amberParameters->pairWiseTerms[i].c8; c10[0] = amberParameters->pairWiseTerms[i].c10; b[0] = amberParameters->pairWiseTerms[i].b; return TRUE; } } return FALSE; } /**********************************************************************/ static gboolean getImproperTorsionParameters( AmberParameters* amberParameters, gint a1Type, gint a2Type, gint a3Type, gint a4Type, gdouble* forceConstant, gdouble* equilibriumAngle, gdouble* terms ) { gint i; forceConstant[0] = 0.0; equilibriumAngle[0] = 0.0; terms[0] = 0.0; if(a1Type>a4Type) { gint t; t = a1Type; a1Type = a4Type; a4Type = t; t = a2Type; a2Type = a3Type; a3Type = t; } for(i=0;i<amberParameters->numberOfImproperTorsionTerms;i++) { if( a1Type == amberParameters->improperTorsionTerms[i].numbers[0] && a2Type == amberParameters->improperTorsionTerms[i].numbers[1] && a3Type == amberParameters->improperTorsionTerms[i].numbers[2] && a4Type == amberParameters->improperTorsionTerms[i].numbers[3] ) { forceConstant[0] = amberParameters->improperTorsionTerms[i].barrier; equilibriumAngle[0] = amberParameters->improperTorsionTerms[i].phase; terms[0] = amberParameters->improperTorsionTerms[i].n; return TRUE; } } return FALSE; } /**********************************************************************/ static gint getNumberDihedralParameters( AmberParameters* amberParameters, gint a1Type, gint a2Type, gint a3Type, gint a4Type, gint *n) { gint i; gint a1Typet; gint a2Typet; gint a3Typet; gint a4Typet; gboolean btype; gboolean Ok; gint types[4]; gint k; *n = 0; a1Typet = a4Type; a2Typet = a3Type; a3Typet = a2Type; a4Typet = a1Type; /* Je cherche d'abord sans les -1 */ for(i=0;i<amberParameters->numberOfDihedralTerms;i++) { types[0] = a1Type; types[1] = a2Type; types[2] = a3Type; types[3] = a4Type; Ok = TRUE; for(k=0;k<4;k++) { btype = (types[k] == amberParameters->dihedralAngleTerms[i].numbers[k]); if(!btype) { Ok = FALSE; break; } } if(!Ok) { types[0] = a1Typet; types[1] = a2Typet; types[2] = a3Typet; types[3] = a4Typet; Ok = TRUE; for(k=0;k<4;k++) { btype = (types[k] == amberParameters->dihedralAngleTerms[i].numbers[k]); if(!btype) { Ok = FALSE; break; } } } if(Ok) { *n =i; return amberParameters->dihedralAngleTerms[i].nSomme; } } /* Je cherche d'abord avec les -1 */ for(i=0;i<amberParameters->numberOfDihedralTerms;i++) { types[0] = a1Type; types[1] = a2Type; types[2] = a3Type; types[3] = a4Type; Ok = TRUE; for(k=0;k<4;k++) { btype = (amberParameters->dihedralAngleTerms[i].numbers[k] == -1) || (types[k] == amberParameters->dihedralAngleTerms[i].numbers[k]); if(!btype) { Ok = FALSE; break; } } if(!Ok) { types[0] = a1Typet; types[1] = a2Typet; types[2] = a3Typet; types[3] = a4Typet; Ok = TRUE; for(k=0;k<4;k++) { btype = (amberParameters->dihedralAngleTerms[i].numbers[k] == -1) || (types[k] == amberParameters->dihedralAngleTerms[i].numbers[k]); if(!btype) { Ok = FALSE; break; } } } if(Ok) { *n =i; return amberParameters->dihedralAngleTerms[i].nSomme; } } return 0; } /**********************************************************************/ static gboolean canHydrogenBond(AmberParameters* amberParameters, gint a1Type, gint a2Type ) { AmberAtomTypes* types = amberParameters->atomTypes; if( a1Type>-1 && a2Type>-1) if( types[a1Type].name[0] == 'H' || types[a2Type].name[0] == 'H' ) return TRUE; return FALSE; } /**********************************************************************/ static void setRattleConstraintsParameters(ForceField* forceField) { gint i; gint j; gint k; gint a1,a2,a3; gdouble r2; gdouble d; Molecule* m = &forceField->molecule; gint numberOfRattleConstraintsTerms = 0; gdouble* rattleConstraintsTerms[RATTLEDIM]; forceField->numberOfRattleConstraintsTerms = 0; for( i=0; i<RATTLEDIM;i++) forceField->rattleConstraintsTerms[i] = NULL; if(m->nAtoms<1) return; if(forceField->options.rattleConstraints==NOCONSTRAINTS) return; numberOfRattleConstraintsTerms = m->numberOf2Connections; if(forceField->options.rattleConstraints==BONDSANGLESCONSTRAINTS) numberOfRattleConstraintsTerms += m->numberOf3Connections; if(numberOfRattleConstraintsTerms<1) return; for( i=0; i<RATTLEDIM;i++) rattleConstraintsTerms[i] = g_malloc(numberOfRattleConstraintsTerms*sizeof(gdouble)); /* 1=a1, 2=a2, 3=r2a1a2 */ /* RATTLEDIM 3 */ j = 0; for ( i = 0; i < m->numberOf2Connections; i++) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->connected2[0][i]; a2 = m->connected2[1][i]; if(!m->atoms[a1].variable &&!m->atoms[a2].variable) continue; r2 = 0; for (k=0;k<3;k++) { d = m->atoms[a1].coordinates[k]-m->atoms[a2].coordinates[k]; r2 +=d*d; } rattleConstraintsTerms[0][j] = a1; rattleConstraintsTerms[1][j] = a2; rattleConstraintsTerms[2][j] = r2; j++; } if(forceField->options.rattleConstraints==BONDSANGLESCONSTRAINTS) { gint a1p, a2p; gint* nConnections = NULL; gint* nAngles = NULL; nConnections = g_malloc(m->nAtoms*sizeof(gint)); nAngles = g_malloc(m->nAtoms*sizeof(gint)); for ( i = 0; i < m->nAtoms; i++) { nConnections[i] = 0; nAngles[i] = 0; } for ( i = 0; i < m->nAtoms; i++) if(m->atoms[i].typeConnections) { for ( k = 0; k < m->nAtoms; k++) if(i!=k && m->atoms[i].typeConnections[m->atoms[k].N-1]>0) nConnections[i]++; /* printf("%d %s nCon=%d\n",i,m->atoms[i].mmType,nConnections[i]);*/ } for ( i = 0; i < m->numberOf3Connections; i++) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->connected3[0][i]; a2 = m->connected3[1][i]; a3 = m->connected3[2][i]; if(!m->atoms[a1].variable &&!m->atoms[a3].variable) continue; if(nAngles[a2]>=2*nConnections[a2]-3) continue; for (k=0;k<j;k++) { a1p = (gint)rattleConstraintsTerms[0][k]; a2p = (gint)rattleConstraintsTerms[1][k]; if(a1p==a1 && a2p==a3) break; if(a1p==a3 && a2p==a1) break; } if(k!=j) continue; nAngles[a2]++; r2 = 0; for (k=0;k<3;k++) { d = m->atoms[a1].coordinates[k]-m->atoms[a3].coordinates[k]; r2 +=d*d; } rattleConstraintsTerms[0][j] = a1; rattleConstraintsTerms[1][j] = a3; rattleConstraintsTerms[2][j] = r2; j++; } /* for ( i = 0; i < m->nAtoms; i++) { printf("%d %s nAngle = %d 2*nCon-3=%d\n",i,m->atoms[i].mmType,nAngles[i],2*nConnections[i]-3); } */ if(nConnections) g_free(nConnections); if(nAngles) g_free(nAngles); } if(j<1) { numberOfRattleConstraintsTerms=0; for( i=0; i<RATTLEDIM;i++) { g_free(rattleConstraintsTerms[i]); rattleConstraintsTerms[i] = NULL; } } else if(numberOfRattleConstraintsTerms!=j) { numberOfRattleConstraintsTerms=j; for( i=0; i<RATTLEDIM;i++) { rattleConstraintsTerms[i] = g_realloc(rattleConstraintsTerms[i],numberOfRattleConstraintsTerms*sizeof(gdouble)); } } forceField->numberOfRattleConstraintsTerms = numberOfRattleConstraintsTerms; for( i=0; i<RATTLEDIM;i++) forceField->rattleConstraintsTerms[i] = rattleConstraintsTerms[i]; printf(_("number Of RattleConstraintsTerms = %d\n"), forceField->numberOfRattleConstraintsTerms); printf(_("number free degrees = %d\n"), 3*m->nAtoms-6-forceField->numberOfRattleConstraintsTerms); /* for ( i = 0; i < forceField->numberOfRattleConstraintsTerms; i++) { a1 = (gint)rattleConstraintsTerms[0][i]; a2 = (gint)rattleConstraintsTerms[1][i]; r2 = rattleConstraintsTerms[2][i]; printf("%d %d %s %s r2= %f\n", a1,a2, m->atoms[a1].mmType, m->atoms[a2].mmType, r2); } */ } /**********************************************************************/ static void setStretchParameters(AmberParameters* amberParameters,ForceField* forceField,gint* atomTypes) { gint i; gint a1,a2; gint a1Type, a2Type; gdouble forceConstant, equilibriumDistance; Molecule* m = &forceField->molecule; gint numberOfStretchTerms = 0; gdouble* bondStretchTerms[STRETCHDIM]; numberOfStretchTerms = m->numberOf2Connections; for( i=0; i<STRETCHDIM;i++) bondStretchTerms[i] = g_malloc(numberOfStretchTerms*sizeof(gdouble)); /* 1=a1, 2=a2, 3=Force, 4=Re */ /* STRETCHDIM 4 */ for ( i = 0; i < numberOfStretchTerms; i++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->connected2[0][i]; a2 = m->connected2[1][i]; a1Type = atomTypes[a1]; a2Type = atomTypes[a2]; if ( ! ( getStretchParameters(amberParameters, a1Type, a2Type,&forceConstant,&equilibriumDistance ) ) ) { gchar l1 = m->atoms[a1].mmType[0]; gchar l2 = m->atoms[a2].mmType[0]; printf( _("**** couldn't find stretch parameters for %s-%s(%d-%d) "), m->atoms[a1].mmType,m->atoms[a2].mmType,a1Type, a2Type); forceConstant = 310; equilibriumDistance = 1.525; if(l1==l2) { forceConstant = 415; equilibriumDistance = 1.5; } else if((l1=='C' && l2=='H' ) || (l1=='H' && l2=='C' )) { forceConstant = 340; equilibriumDistance = 1.09; } else if((l1=='C' && l2=='O' ) || (l1=='O' && l2=='C' )) { forceConstant = 570; equilibriumDistance = 1.229; } else if((l1=='C' && l2=='N' ) || (l1=='N' && l2=='C' )) { forceConstant = 490; equilibriumDistance = 1.335; } if(isIonic( m->atoms[a1].mmType) || isIonic( m->atoms[a2].mmType)) { forceConstant = 0; } printf( _("-> I set force to %f and equilibrium distance to %f\n"), forceConstant,equilibriumDistance); } bondStretchTerms[0][i] = a1; bondStretchTerms[1][i] = a2; bondStretchTerms[2][i] = forceConstant; bondStretchTerms[3][i] = equilibriumDistance; } forceField->numberOfStretchTerms = numberOfStretchTerms; for( i=0; i<STRETCHDIM;i++) forceField->bondStretchTerms[i] = bondStretchTerms[i]; } /**********************************************************************/ static void setBendParameters(AmberParameters* amberParameters,ForceField* forceField,gint* atomTypes) { gint i; gint a1,a2,a3; gint a1Type, a2Type, a3Type; Molecule* m = &forceField->molecule; gint numberOfBendTerms = 0; gdouble* angleBendTerms[BENDDIM]; gdouble forceConstant, equilibriumAngle; numberOfBendTerms = m->numberOf3Connections; for( i=0; i<BENDDIM;i++) angleBendTerms[i] = g_malloc(numberOfBendTerms*sizeof(gdouble)); /* 5 terms 1=a1, 2=a2, 3=a3, 4=Force, 5=angle */ /* BENDDIM 5 */ for ( i = 0; i < numberOfBendTerms; i++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->connected3[0][i]; a2 = m->connected3[1][i]; a3 = m->connected3[2][i]; a1Type = atomTypes[a1]; a2Type = atomTypes[a2]; a3Type = atomTypes[a3]; if ( ! ( getBendParameters(amberParameters, a1Type, a2Type, a3Type,&forceConstant,&equilibriumAngle ) ) ) { gchar l1 = m->atoms[a1].mmType[0]; gchar l2 = m->atoms[a2].mmType[0]; gchar l3 = m->atoms[a3].mmType[0]; printf(_("**** couldn't find bend parameters for %s-%s-%s "), m->atoms[a1].mmType,m->atoms[a2].mmType,m->atoms[a3].mmType); forceConstant = 60.0; equilibriumAngle = 115.0; if(!strcmp(m->atoms[a2].mmType,"CT")) { forceConstant = 50.0; equilibriumAngle = 109.0; } else if(l1=='H' || l2=='H' || l3=='H') { forceConstant = 50.0; equilibriumAngle = 120.0; } if(isIonic( m->atoms[a1].mmType) || isIonic( m->atoms[a2].mmType) || isIonic( m->atoms[a3].mmType)) { forceConstant = 0; } printf(_("-> I set force to %f and equilibrium angle to %f\n"), forceConstant, equilibriumAngle); } angleBendTerms[0][i] = a1; angleBendTerms[1][i] = a2; angleBendTerms[2][i] = a3; angleBendTerms[3][i] = forceConstant; angleBendTerms[4][i] = equilibriumAngle; } forceField-> numberOfBendTerms = numberOfBendTerms; for( i=0; i<BENDDIM;i++) forceField->angleBendTerms[i] = angleBendTerms[i]; } /**********************************************************************/ static void setDihedralParameters(AmberParameters* amberParameters,ForceField* forceField,gint* atomTypes) { gint i; gint j; gint k; gint l; gint a1,a2,a3,a4; gint a1Type, a2Type, a3Type,a4Type; Molecule* m = &forceField->molecule; gdouble* dihedralAngleTerms[DIHEDRALDIM]; gint numberOfDihedralTerms = 0; gint dim; /* 8 terms 1=a1, 2=a2, 3=a3, 4=a4, 5=Idiv, 6=Pk, 7=Phase, 8=Pn */ /* DIHEDRALDIM 8 */ for( i=0; i<DIHEDRALDIM;i++) dihedralAngleTerms[i] = g_malloc(4*m->numberOf4Connections*sizeof(gdouble)); numberOfDihedralTerms = 0; for ( i = 0; i < m->numberOf4Connections; i++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->connected4[0][i]; a2 = m->connected4[1][i]; a3 = m->connected4[2][i]; a4 = m->connected4[3][i]; a1Type = atomTypes[a1]; a2Type = atomTypes[a2]; a3Type = atomTypes[a3]; a4Type = atomTypes[a4]; dim = getNumberDihedralParameters(amberParameters, a1Type, a2Type, a3Type, a4Type,&k); if(dim>0) { for(j=0;j<dim;j++) { dihedralAngleTerms[0][numberOfDihedralTerms] = a1; dihedralAngleTerms[1][numberOfDihedralTerms] = a2; dihedralAngleTerms[2][numberOfDihedralTerms] = a3; dihedralAngleTerms[3][numberOfDihedralTerms] = a4; dihedralAngleTerms[4][numberOfDihedralTerms] = amberParameters->dihedralAngleTerms[k].divisor[j]; dihedralAngleTerms[5][numberOfDihedralTerms] = amberParameters->dihedralAngleTerms[k].barrier[j]; dihedralAngleTerms[6][numberOfDihedralTerms] = amberParameters->dihedralAngleTerms[k].phase[j]; dihedralAngleTerms[7][numberOfDihedralTerms] = amberParameters->dihedralAngleTerms[k].n[j]; numberOfDihedralTerms++; if(numberOfDihedralTerms>4*m->numberOf4Connections) { for( l=0; l<DIHEDRALDIM;l++) { dihedralAngleTerms[l] = g_realloc(dihedralAngleTerms[l],numberOfDihedralTerms*sizeof(gdouble)); } } } } } forceField-> numberOfDihedralTerms = numberOfDihedralTerms; for( i=0; i<DIHEDRALDIM;i++) forceField->dihedralAngleTerms[i] = dihedralAngleTerms[i]; } /**********************************************************************/ static void setImproperTorionParameters(AmberParameters* amberParameters, ForceField* forceField,gint* atomTypes) { gint i; gint a1,a2,a3,a4; gint a1Type, a2Type, a3Type,a4Type; Molecule* m = &forceField->molecule; gdouble forceConstant, equilibriumAngle, terms; gint numberOfImproperTorsionTerms = 0; gdouble* improperTorsionTerms[IMPROPERDIHEDRALDIM]; /* 8 terms 1=a1, 2=a2, 3=a3, 4=a4, 5=Idiv, 6=Pk, 7=Phase, 8=Pn */ /* IMPROPERDIHEDRALDIM 8 */ numberOfImproperTorsionTerms = m->numberOf4Connections; for( i=0; i<IMPROPERDIHEDRALDIM;i++) improperTorsionTerms[i] = g_malloc(m->numberOf4Connections*sizeof(gdouble)); for ( i = 0; i < numberOfImproperTorsionTerms; i++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->connected4[0][i]; a2 = m->connected4[1][i]; a3 = m->connected4[2][i]; a4 = m->connected4[3][i]; a1Type = atomTypes[a1]; a2Type = atomTypes[a2]; a3Type = atomTypes[a3]; a4Type = atomTypes[a4]; getImproperTorsionParameters(amberParameters, a1Type, a2Type, a3Type,a4Type, &forceConstant, &equilibriumAngle, &terms ); improperTorsionTerms[0][i] = a1; improperTorsionTerms[1][i] = a2; improperTorsionTerms[2][i] = a3; improperTorsionTerms[3][i] = a4; improperTorsionTerms[4][i] = forceConstant; improperTorsionTerms[5][i] = equilibriumAngle; improperTorsionTerms[6][i] = terms; } forceField-> numberOfImproperTorsionTerms = numberOfImproperTorsionTerms; for( i=0; i<IMPROPERDIHEDRALDIM;i++) forceField->improperTorsionTerms[i] = improperTorsionTerms[i]; } /**********************************************************************/ static void setHydrogenBondedParameters(AmberParameters* amberParameters,ForceField* forceField,gint* atomTypes) { gint numberOfHydrogenBonded = 0; gint i; gint a1,a2; gint a1Type,a2Type; Molecule* m = &forceField->molecule; gdouble C, D; gdouble* hydrogenBondedTerms[HYDROGENBONDEDDIM]; for( i=0; i<HYDROGENBONDEDDIM;i++) hydrogenBondedTerms[i] = g_malloc(m->numberOfNonBonded*sizeof(gdouble)); for ( i = 0; i < m->numberOfNonBonded; i++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->nonBonded[0][i]; a2 = m->nonBonded[1][i]; a1Type = atomTypes[a1]; a2Type = atomTypes[a2]; /* printf("a1 = %d a2 = %d %s %s\n",a1,a2, amberParameters->atomTypes[a1Type].name, amberParameters->atomTypes[a2Type].name);*/ if ( canHydrogenBond( amberParameters, a1Type, a2Type ) ) { getHydrogenBondedParameters(amberParameters, a1Type, a2Type, &C, &D ); hydrogenBondedTerms[0][numberOfHydrogenBonded] = a1; hydrogenBondedTerms[1][numberOfHydrogenBonded] = a2; hydrogenBondedTerms[2][numberOfHydrogenBonded] = C; hydrogenBondedTerms[3][numberOfHydrogenBonded] = D; numberOfHydrogenBonded++; } } if(numberOfHydrogenBonded==0) for( i=0; i<HYDROGENBONDEDDIM;i++) { g_free(hydrogenBondedTerms[i]); hydrogenBondedTerms[i] = NULL; } else for( i=0; i<HYDROGENBONDEDDIM;i++) { hydrogenBondedTerms[i] = g_realloc(hydrogenBondedTerms[i],numberOfHydrogenBonded*sizeof(gdouble)); } forceField-> numberOfHydrogenBonded = numberOfHydrogenBonded; for( i=0; i<HYDROGENBONDEDDIM;i++) forceField->hydrogenBondedTerms[i] = hydrogenBondedTerms[i]; } /**********************************************************************/ static void setNonBondedParameters(AmberParameters* amberParameters, ForceField* forceField,gint* atomTypes) { gint numberOfNonBonded = 0; gint i; gint a1,a2,a4; gint a1Type,a2Type,a4Type; Molecule* m = &forceField->molecule; gdouble equilibriumDistance, epsilon; gdouble epsilonProduct; gdouble ri, rj; gdouble Aij, Bij; gdouble* nonBondedTerms[NONBONDEDDIM]; gboolean useHydrogenBonded = forceField->options.hydrogenBonded; /* 5 terms 1=a1, 2=a2, 3=Aij, 4=Bij, 5=Coulomb Factor */ /* NONBONDEDDIM 5 */ for( i=0; i<NONBONDEDDIM;i++) nonBondedTerms[i] = g_malloc((m->numberOfNonBonded+m->numberOf4Connections)*sizeof(gdouble)); for ( i = 0; i < m->numberOfNonBonded; i++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->nonBonded[0][i]; a2 = m->nonBonded[1][i]; /* if(a1==a2) { printf("Erreur non bonded\n"); break; } */ a1Type = atomTypes[a1]; a2Type = atomTypes[a2]; if ( !useHydrogenBonded || !canHydrogenBond(amberParameters, a1Type, a2Type ) ) { if ( ! ( getNonBondedParameters(amberParameters, a1Type, &equilibriumDistance, &epsilon ) ) ) printf(_("**** couldn't find non bonded parameters for %s \n"),m->atoms[a1].mmType); epsilonProduct = sqrt(fabs(epsilon)); ri = equilibriumDistance; /*printf("r1 = %f eps1 = %f\n",equilibriumDistance,epsilon);*/ getNonBondedParameters(amberParameters, a2Type, &equilibriumDistance, &epsilon ); /*printf("r2 = %f eps2 = %f\n",equilibriumDistance,epsilon);*/ epsilonProduct *= sqrt(fabs(epsilon)); rj = equilibriumDistance; Bij = ( ri + rj ) * ( ri + rj ); Bij = Bij * Bij * Bij; Aij = Bij * Bij * epsilonProduct; Bij *= epsilonProduct * 2.0; nonBondedTerms[0][numberOfNonBonded] = a1; nonBondedTerms[1][numberOfNonBonded] = a2; nonBondedTerms[2][numberOfNonBonded] = Aij; nonBondedTerms[3][numberOfNonBonded] = Bij; nonBondedTerms[4][numberOfNonBonded] = 1.0; numberOfNonBonded++; } } /* now 1/2 non bonded */ for ( i = 0; i < m->numberOf4Connections; i++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = m->connected4[0][i]; a4 = m->connected4[3][i]; /* if(a1==a4) { printf("Erreur a1=a4\n"); break; } */ a1Type = atomTypes[a1]; a4Type = atomTypes[a4]; epsilonProduct = 0; ri = 0; rj = 0; if ( getNonBondedParameters(amberParameters, a1Type, &equilibriumDistance, &epsilon ) ) { epsilonProduct = sqrt(fabs(epsilon)); ri = equilibriumDistance; /*printf("r1 = %f eps1 = %f\n",equilibriumDistance,epsilon);*/ } else { epsilonProduct = 0; } if ( getNonBondedParameters( amberParameters, a4Type, &equilibriumDistance, &epsilon ) ) { epsilonProduct *= sqrt(fabs(epsilon)); rj = equilibriumDistance; /*printf("r2 = %f eps2 = %f\n",equilibriumDistance,epsilon);*/ } else { epsilonProduct = 0; } Bij = ( ri + rj ) * ( ri + rj ); Bij = Bij * Bij * Bij; Aij = Bij * Bij * epsilonProduct / 2.0; Bij *= epsilonProduct; /* Aij = 0; Bij = 0; */ nonBondedTerms[0][numberOfNonBonded] = a1; nonBondedTerms[1][numberOfNonBonded] = a4; nonBondedTerms[2][numberOfNonBonded] = Aij; nonBondedTerms[3][numberOfNonBonded] = Bij; nonBondedTerms[4][numberOfNonBonded] = 1.0/(gdouble)1.2; numberOfNonBonded++; } if(numberOfNonBonded==0) for( i=0; i<NONBONDEDDIM;i++) { g_free(nonBondedTerms[i]); nonBondedTerms[i] = NULL; } else for( i=0; i<NONBONDEDDIM;i++) nonBondedTerms[i] = g_realloc(nonBondedTerms[i],numberOfNonBonded*sizeof(gdouble)); forceField-> numberOfNonBonded = numberOfNonBonded; for( i=0; i<NONBONDEDDIM;i++) forceField->nonBondedTerms[i] = nonBondedTerms[i]; } /**********************************************************************/ static void setPairWiseParameters(AmberParameters* amberParameters, ForceField* forceField,gint* atomTypes) { gint numberOfPairWise = 0; gint i; gint j; gint a1,a2; gint a1Type,a2Type; Molecule* m = &forceField->molecule; gdouble a, beta, c6, c8, c10, b; gdouble* pairWiseTerms[PAIRWISEDIM]; numberOfPairWise = m->nAtoms*(m->nAtoms-1)/2; /* PAIRWISEDIM 8 */ for( i=0; i<PAIRWISEDIM;i++) pairWiseTerms[i] = g_malloc((numberOfPairWise)*sizeof(gdouble)); numberOfPairWise = 0; for ( i = 0; i < m->nAtoms; i++ ) for ( j = i+1; j < m->nAtoms; j++ ) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) break; a1 = i; a2 = j; a1Type = atomTypes[a1]; a2Type = atomTypes[a2]; if ( ! ( getPairWiseParameters(amberParameters, a1Type,a2Type,&a, &beta,&c6,&c8, &c10,&b) ) ) printf( _("**** couldn't find pair wise parameters for %s-%s\n"), m->atoms[a1].mmType, m->atoms[a2].mmType); pairWiseTerms[0][numberOfPairWise] = a1; pairWiseTerms[1][numberOfPairWise] = a2; pairWiseTerms[2][numberOfPairWise] = a; pairWiseTerms[3][numberOfPairWise] = beta; pairWiseTerms[4][numberOfPairWise] = c6; pairWiseTerms[5][numberOfPairWise] = c8; pairWiseTerms[6][numberOfPairWise] = c10; pairWiseTerms[7][numberOfPairWise] = b; numberOfPairWise++; } if(numberOfPairWise==0) for( i=0; i<PAIRWISEDIM;i++) { g_free(pairWiseTerms[i]); pairWiseTerms[i] = NULL; } else for( i=0; i<PAIRWISEDIM;i++) pairWiseTerms[i] = g_realloc(pairWiseTerms[i],numberOfPairWise*sizeof(gdouble)); forceField-> numberOfPairWise = numberOfPairWise; for( i=0; i<PAIRWISEDIM;i++) forceField->pairWiseTerms[i] = pairWiseTerms[i]; } /**********************************************************************/ static void setAtomTypes(AmberParameters* amberParameters,ForceField* forceField,gint* atomTypes) { Molecule* m = &forceField->molecule; gint nAtoms = m->nAtoms; gint i; for(i=0;i<nAtoms;i++) { /* printf("Atom %s=",m->atoms[i].mmType); */ atomTypes[i] = getNumberType(amberParameters, m->atoms[i].mmType); /* { gint j; gint nTypes = amberParameters->numberOfTypes; AmberAtomTypes* types = amberParameters->atomTypes; gchar* type = m->atoms[i].mmType; gint len = strlen(type); if(strcmp(type,"X")==0) printf("-1\n"); for(j=0;j<nTypes;j++) { if(len == (gint)strlen(types[j].name) && strstr(types[j].name,type)) printf(" %d \n",types[j].number); } } */ } } /**********************************************************************/ static void setAmberParameters(ForceField* forceField) { Molecule* m = &forceField->molecule; gint* atomTypes = g_malloc(m->nAtoms*sizeof(gint)); AmberParameters amberParameters; if(staticAmberParameters && staticAmberParameters->numberOfTypes >0 ) amberParameters = *staticAmberParameters; else { gchar* persoFileName = g_strdup_printf("%s%sPersonalMM.prm",gabedit_directory(), G_DIR_SEPARATOR_S); gchar* defaultFileName = g_strdup_printf("%s%sMolecularMechanics.prm",gabedit_directory(), G_DIR_SEPARATOR_S); amberParameters = newAmberParameters(); if(!readAmberParameters(&amberParameters,persoFileName)) if(!readAmberParameters(&amberParameters,persoFileName)) { g_free(persoFileName); g_free(defaultFileName); return; } staticAmberParameters = g_malloc(sizeof(AmberParameters)); *staticAmberParameters = amberParameters; g_free(persoFileName); g_free(defaultFileName); } setAtomTypes(&amberParameters,forceField,atomTypes); while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) { return; } while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.bondStretch) setStretchParameters(&amberParameters,forceField,atomTypes); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.angleBend) setBendParameters(&amberParameters,forceField,atomTypes); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.dihedralAngle) setDihedralParameters(&amberParameters, forceField,atomTypes); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.improperTorsion) setImproperTorionParameters(&amberParameters,forceField,atomTypes); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.hydrogenBonded) setHydrogenBondedParameters(&amberParameters,forceField,atomTypes); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.nonBonded) setNonBondedParameters(&amberParameters,forceField,atomTypes); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.rattleConstraints!=NOCONSTRAINTS) setRattleConstraintsParameters(forceField); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); /* freeAmberParameters(&amberParameters); */ } /**********************************************************************/ static void setAllPairWiseParameters(ForceField* forceField) { Molecule* m = &forceField->molecule; gint* atomTypes = g_malloc(m->nAtoms*sizeof(gint)); AmberParameters amberParameters; if(staticAmberParameters && staticAmberParameters->numberOfTypes >0 ) amberParameters = *staticAmberParameters; else { gchar* persoFileName = g_strdup_printf("%s%sPersonalMM.prm",gabedit_directory(), G_DIR_SEPARATOR_S); gchar* defaultFileName = g_strdup_printf("%s%sMolecularMechanics.prm",gabedit_directory(), G_DIR_SEPARATOR_S); amberParameters = newAmberParameters(); if(!readAmberParameters(&amberParameters,persoFileName)) if(!readAmberParameters(&amberParameters,persoFileName)) { g_free(persoFileName); g_free(defaultFileName); return; } staticAmberParameters = g_malloc(sizeof(AmberParameters)); *staticAmberParameters = amberParameters; g_free(persoFileName); g_free(defaultFileName); } setAtomTypes(&amberParameters,forceField,atomTypes); while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) { return; } while( gtk_events_pending() ) gtk_main_iteration(); setPairWiseParameters(&amberParameters,forceField,atomTypes); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); if(forceField->options.rattleConstraints!=NOCONSTRAINTS) setRattleConstraintsParameters(forceField); if(StopCalcul) return; while( gtk_events_pending() ) gtk_main_iteration(); /* freeAmberParameters(&amberParameters); */ } /**********************************************************************/ static void calculateGradientBondAmber(ForceField* forceField) { gint i; gint ai, aj; AtomMol atomi,atomj; gdouble rijx, rijy, rijz, forceConstant, equilibriumDistance, term; gdouble forceix, forceiy, forceiz; gdouble bondLength; Molecule* m = &forceField->molecule; gdouble* bondStretchTerms[STRETCHDIM]; gint numberOfStretchTerms = forceField->numberOfStretchTerms; for( i=0; i<STRETCHDIM;i++) bondStretchTerms[i] = forceField->bondStretchTerms[i]; #ifdef ENABLE_OMP #pragma omp parallel for private(i,ai,aj,forceConstant, equilibriumDistance,atomi,atomj,rijx,rijy,rijz,bondLength,term,forceix,forceiy,forceiz) #endif for ( i = 0; i < numberOfStretchTerms; i++ ) { ai = (gint)bondStretchTerms[0][i]; aj = (gint)bondStretchTerms[1][i]; forceConstant = bondStretchTerms[2][i]; equilibriumDistance = bondStretchTerms[3][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; bondLength = sqrt( rijx * rijx + rijy * rijy + rijz * rijz ); if ( bondLength < 1.0e-10 ) bondLength = 1.0e-10; term = - 2*forceConstant * ( bondLength - equilibriumDistance ) / bondLength; forceix = term * rijx; forceiy = term * rijy; forceiz = term * rijz; #ifdef ENABLE_OMP #pragma omp critical #endif { m->gradient[0][ai] -= forceix; m->gradient[1][ai] -= forceiy; m->gradient[2][ai] -= forceiz; m->gradient[0][aj] += forceix; m->gradient[1][aj] += forceiy; m->gradient[2][aj] += forceiz; } } } /**********************************************************************/ static void calculateGradientBendAmber(ForceField* forceField) { gint i; Molecule* m = &forceField->molecule; gdouble* angleBendTerms[BENDDIM]; static gdouble D2R = 1.0/57.29577951308232090712; gint numberOfBendTerms = forceField->numberOfBendTerms; for( i=0; i<BENDDIM;i++) angleBendTerms[i] = forceField->angleBendTerms[i]; #ifdef ENABLE_OMP #pragma omp parallel for private(i) #endif for ( i = 0; i < numberOfBendTerms; i++ ) { gint ai, aj, ak; AtomMol atomi,atomj,atomk; gdouble term; gdouble thetaDeg, thetaRad, cosTheta; gdouble denominator, absTheta; gdouble delta = 1e-10; gdouble rijx, rijy, rijz; gdouble rkjx, rkjy, rkjz; gdouble rij2, rij, rkj2, rkj,rij3, rkj3; gdouble denominatori, denominatork; gdouble forceix, forceiy, forceiz; gdouble forcejx, forcejy, forcejz; gdouble forcekx, forceky, forcekz; gdouble rijDotrkj; gdouble term2ix, term2iy, term2iz; gdouble term2jx, term2jy, term2jz; gdouble term2kx, term2ky, term2kz; ai = (gint)angleBendTerms[0][i]; aj = (gint)angleBendTerms[1][i]; ak = (gint)angleBendTerms[2][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; atomk = m->atoms[ak]; thetaDeg = getAngle(&atomi, &atomj, &atomk); thetaRad = thetaDeg * D2R; absTheta = fabs( thetaDeg ); cosTheta = cos( thetaRad ); if ( ( absTheta > delta ) && ( absTheta < 180.0 - delta ) ) { /*denominator = sqrt( 1 - cosTheta * cosTheta );*/ denominator = sin(thetaRad); if ( denominator < 1.0e-10 ) { printf("cut denominator\n"); denominator = 1.0e-10; } term = 2*angleBendTerms[3][i] * (thetaDeg - angleBendTerms[4][i]) / denominator; term *= D2R; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; rkjx = atomk.coordinates[0] - atomj.coordinates[0]; rkjy = atomk.coordinates[1] - atomj.coordinates[1]; rkjz = atomk.coordinates[2] - atomj.coordinates[2]; rij2 = rijx * rijx + rijy * rijy + rijz * rijz; rij = sqrt( rij2 ); rkj2 = rkjx * rkjx + rkjy * rkjy + rkjz * rkjz; rkj = sqrt( rkj2 ); rijDotrkj = rijx * rkjx + rijy * rkjy + rijz * rkjz; rij3 = rij2 * rij; rkj3 = rkj2 * rkj; denominatori = rij3 * rkj; if ( denominatori < 1.0e-10 ) { printf("cut denominatori\n"); denominatori = 1.0e-10; } denominatork = rij * rkj3; if ( denominatork < 1.0e-10 ) { printf("cut denominatork\n"); denominatork = 1.0e-10; } term2ix = ( rij2 * rkjx - rijDotrkj * rijx ) / denominatori; term2iy = ( rij2 * rkjy - rijDotrkj * rijy ) / denominatori; term2iz = ( rij2 * rkjz - rijDotrkj * rijz ) / denominatori; term2kx = ( rkj2 * rijx - rijDotrkj * rkjx ) / denominatork; term2ky = ( rkj2 * rijy - rijDotrkj * rkjy ) / denominatork; term2kz = ( rkj2 * rijz - rijDotrkj * rkjz ) / denominatork; term2jx = - term2ix - term2kx; term2jy = - term2iy - term2ky; term2jz = - term2iz - term2kz; forceix = term * term2ix; forceiy = term * term2iy; forceiz = term * term2iz; forcejx = term * term2jx; forcejy = term * term2jy; forcejz = term * term2jz; forcekx = term * term2kx; forceky = term * term2ky; forcekz = term * term2kz; #ifdef ENABLE_OMP #pragma omp critical #endif { m->gradient[0][ai] -= forceix; m->gradient[1][ai] -= forceiy; m->gradient[2][ai] -= forceiz; m->gradient[0][aj] -= forcejx; m->gradient[1][aj] -= forcejy; m->gradient[2][aj] -= forcejz; m->gradient[0][ak] -= forcekx; m->gradient[1][ak] -= forceky; m->gradient[2][ak] -= forcekz; } } } } /**********************************************************************/ static void calculateGradientDihedralAmber(ForceField* forceField) { gint i; Molecule* m = &forceField->molecule; gdouble* dihedralAngleTerms[DIHEDRALDIM]; static gdouble D2R = 1.0/57.29577951308232090712; gint numberOfDihedralTerms = forceField->numberOfDihedralTerms; for(i=0;i<DIHEDRALDIM;i++) dihedralAngleTerms[i] = forceField->dihedralAngleTerms[i]; #ifdef ENABLE_OMP #pragma omp parallel for private(i) #endif for ( i = 0; i < numberOfDihedralTerms; i++ ) { gint ai, aj, ak, al; AtomMol atomi,atomj,atomk,atoml; gint j; gdouble rjix, rjiy, rjiz; gdouble rkjx, rkjy, rkjz; gdouble rkix, rkiy, rkiz; gdouble rljx, rljy, rljz; gdouble rlkx, rlky, rlkz; gdouble forceix, forceiy, forceiz; gdouble forcejx, forcejy, forcejz; gdouble forcekx, forceky, forcekz; gdouble forcelx, forcely, forcelz; gdouble rkj; gdouble xt, yt, zt; gdouble xu, yu, zu; gdouble xtu, ytu, ztu; gdouble rt2, ru2, rtru; gdouble cosine1, sine1, cosineN, sineN, cosold, sinold; gdouble cosPhase, sinPhase; gdouble dedxt, dedyt, dedzt; gdouble dedxu, dedyu, dedzu; gdouble dedphi; gint n; gdouble vn; ai = (gint)dihedralAngleTerms[0][i]; aj = (gint)dihedralAngleTerms[1][i]; ak = (gint)dihedralAngleTerms[2][i]; al = (gint)dihedralAngleTerms[3][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; atomk = m->atoms[ak]; atoml = m->atoms[al]; rjix = atomj.coordinates[0] - atomi.coordinates[0]; rjiy = atomj.coordinates[1] - atomi.coordinates[1]; rjiz = atomj.coordinates[2] - atomi.coordinates[2]; rkjx = atomk.coordinates[0] - atomj.coordinates[0]; rkjy = atomk.coordinates[1] - atomj.coordinates[1]; rkjz = atomk.coordinates[2] - atomj.coordinates[2]; rlkx = atoml.coordinates[0] - atomk.coordinates[0]; rlky = atoml.coordinates[1] - atomk.coordinates[1]; rlkz = atoml.coordinates[2] - atomk.coordinates[2]; xt = rjiy*rkjz - rkjy*rjiz; yt = rjiz*rkjx - rkjz*rjix; zt = rjix*rkjy - rkjx*rjiy; xu = rkjy*rlkz - rlky*rkjz; yu = rkjz*rlkx - rlkz*rkjx; zu = rkjx*rlky - rlkx*rkjy; xtu = yt*zu - yu*zt; ytu = zt*xu - zu*xt; ztu = xt*yu - xu*yt; rt2 = xt*xt + yt*yt + zt*zt; ru2 = xu*xu + yu*yu + zu*zu; rtru = sqrt(rt2 * ru2); rkj = sqrt(rkjx*rkjx + rkjy*rkjy + rkjz*rkjz); cosine1 = 1.0; sine1 = 0.0; if (rtru <1e-10) rtru = 1e-10; if (rt2 <1e-10) rt2 = 1e-10; if (ru2 <1e-10) ru2 = 1e-10; cosine1 = (xt*xu + yt*yu + zt*zu) / rtru; sine1 = (rkjx*xtu + rkjy*ytu + rkjz*ztu) / (rkj*rtru); n = (gint)dihedralAngleTerms[7][i]; cosPhase = cos(D2R*dihedralAngleTerms[6][i]); sinPhase = sin(D2R*dihedralAngleTerms[6][i]); vn = dihedralAngleTerms[5][i]/dihedralAngleTerms[4][i]; /* compute the multiple angle trigonometry and the phase terms */ cosineN = cosine1; sineN = sine1; for(j=2;j<=n;j++) { cosold = cosineN; sinold = sineN; cosineN = cosine1*cosold - sine1*sinold; sineN = cosine1*sinold + sine1*cosold; } dedphi = vn*n*(cosineN*sinPhase-sineN*cosPhase); /* chain rule terms for first derivative components */ rkix = atomk.coordinates[0] - atomi.coordinates[0]; rkiy = atomk.coordinates[1] - atomi.coordinates[1]; rkiz = atomk.coordinates[2] - atomi.coordinates[2]; rljx = atoml.coordinates[0] - atomj.coordinates[0]; rljy = atoml.coordinates[1] - atomj.coordinates[1]; rljz = atoml.coordinates[2] - atomj.coordinates[2]; dedxt = dedphi * (yt*rkjz - rkjy*zt) / (rt2*rkj); dedyt = dedphi * (zt*rkjx - rkjz*xt) / (rt2*rkj); dedzt = dedphi * (xt*rkjy - rkjx*yt) / (rt2*rkj); dedxu = -dedphi * (yu*rkjz - rkjy*zu) / (ru2*rkj); dedyu = -dedphi * (zu*rkjx - rkjz*xu) / (ru2*rkj); dedzu = -dedphi * (xu*rkjy - rkjx*yu) / (ru2*rkj); /* compute first derivative components for this angle */ forceix = rkjz*dedyt - rkjy*dedzt; forceiy = rkjx*dedzt - rkjz*dedxt; forceiz = rkjy*dedxt - rkjx*dedyt; forcejx = rkiy*dedzt - rkiz*dedyt + rlkz*dedyu - rlky*dedzu; forcejy = rkiz*dedxt - rkix*dedzt + rlkx*dedzu - rlkz*dedxu; forcejz = rkix*dedyt - rkiy*dedxt + rlky*dedxu - rlkx*dedyu; forcekx = rjiz*dedyt - rjiy*dedzt + rljy*dedzu - rljz*dedyu; forceky = rjix*dedzt - rjiz*dedxt + rljz*dedxu - rljx*dedzu; forcekz = rjiy*dedxt - rjix*dedyt + rljx*dedyu - rljy*dedxu; forcelx = rkjz*dedyu - rkjy*dedzu; forcely = rkjx*dedzu - rkjz*dedxu; forcelz = rkjy*dedxu - rkjx*dedyu; #ifdef ENABLE_OMP #pragma omp critical #endif { m->gradient[0][ai] += forceix; m->gradient[1][ai] += forceiy; m->gradient[2][ai] += forceiz; m->gradient[0][aj] += forcejx; m->gradient[1][aj] += forcejy; m->gradient[2][aj] += forcejz; m->gradient[0][ak] += forcekx; m->gradient[1][ak] += forceky; m->gradient[2][ak] += forcekz; m->gradient[0][al] += forcelx; m->gradient[1][al] += forcely; m->gradient[2][al] += forcelz; } } } /**********************************************************************/ static void calculateGradientImproperTorsion(ForceField* forceField) { } /**********************************************************************/ static void calculateGradientNonBondedAmber(ForceField* forceField) { gint i; gboolean useCoulomb = forceField->options.coulomb; Molecule* m = &forceField->molecule; gdouble* nonBondedTerms[NONBONDEDDIM]; gint numberOfNonBonded = forceField->numberOfNonBonded; for(i=0;i<NONBONDEDDIM;i++) nonBondedTerms[i] = forceField->nonBondedTerms[i]; /* non-bonded part */ #ifdef ENABLE_OMP #pragma omp parallel for private(i) #endif for ( i = 0; i < numberOfNonBonded; i++ ) { gint ai, aj; AtomMol atomi,atomj; gdouble rijx, rijy, rijz; gdouble forceix, forceiy, forceiz; gdouble forcejx, forcejy, forcejz; gdouble permittivityScale = 1, permittivity = 1; gdouble coulombFactor, factorNonBonded; gdouble rij2, rij; gdouble rij3; gdouble chargei, chargej, coulombTerm; gdouble Aij, Bij, rij6, rij7, rij14, rij8; gdouble term1, term2, term3; coulombFactor = 332.05382 / ( permittivity * permittivityScale ); ai = (gint)nonBondedTerms[0][i]; aj = (gint)nonBondedTerms[1][i]; Aij = nonBondedTerms[2][i]; Bij = nonBondedTerms[3][i]; factorNonBonded = nonBondedTerms[4][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; chargei = atomi.charge; chargej = atomj.charge; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; rij2 = rijx * rijx + rijy * rijy + rijz * rijz; if ( rij2 < 1.0e-2 ) rij2 = 1.0e-2; rij = sqrt( rij2 ); rij3 = rij2 * rij; rij6 = rij3 * rij3; rij7 = rij6 * rij; rij8 = rij7 * rij; rij14 = rij7 * rij7; if(useCoulomb) coulombTerm = ( chargei * chargej * coulombFactor*factorNonBonded ) / rij3; else coulombTerm = 0.0; /*printf("coulombTerm = %f\n",coulombTerm);*/ term1 = 12 * Aij / rij14; term2 = 6 * Bij / rij8; term3 = term1 - term2 + coulombTerm; forceix = term3 * rijx; forceiy = term3 * rijy; forceiz = term3 * rijz; forcejx = - forceix; forcejy = - forceiy; forcejz = - forceiz; #ifdef ENABLE_OMP #pragma omp critical #endif { m->gradient[0][ai] -= forceix; m->gradient[1][ai] -= forceiy; m->gradient[2][ai] -= forceiz; m->gradient[0][aj] -= forcejx; m->gradient[1][aj] -= forcejy; m->gradient[2][aj] -= forcejz; } } } /*********************************************************************/ static void calculateGradientHydrogenBondedAmber(ForceField* forceField) { gint i; Molecule* m = &forceField->molecule; gdouble* hydrogenBondedTerms[HYDROGENBONDEDDIM]; gint numberOfHydrogenBonded = forceField->numberOfHydrogenBonded; for(i=0;i<HYDROGENBONDEDDIM;i++) hydrogenBondedTerms[i] = forceField->hydrogenBondedTerms[i]; /* Hydrogen-bonded part */ #ifdef ENABLE_OMP #pragma omp parallel for private(i) #endif for ( i = 0; i < numberOfHydrogenBonded; i++ ) { gint ai, aj; AtomMol atomi,atomj; gdouble rijx, rijy, rijz; gdouble forceix, forceiy, forceiz; gdouble forcejx, forcejy, forcejz; gdouble Cij, Dij, rij2, rij4, rij8, rij12, rij14; gdouble term1, term2, term3; ai = (gint)hydrogenBondedTerms[0][i]; aj = (gint)hydrogenBondedTerms[1][i]; Cij = hydrogenBondedTerms[2][i]; Dij = hydrogenBondedTerms[3][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; rij2 = rijx * rijx + rijy * rijy + rijz * rijz; if ( rij2 < 1.0e-2 ) rij2 = 1.0e-2; rij4 = rij2 * rij2; rij8 = rij4 * rij4; rij12 = rij8 * rij4; rij14 = rij12 * rij2; term1 = Cij / rij14; term2 = Dij / rij12; term3 = term1 - term2; forceix = term3 * rijx; forceiy = term3 * rijy; forceiz = term3 * rijz; forcejx = - forceix; forcejy = - forceiy; forcejz = - forceiz; #ifdef ENABLE_OMP #pragma omp critical #endif { m->gradient[0][ai] -= forceix; m->gradient[1][ai] -= forceiy; m->gradient[2][ai] -= forceiz; m->gradient[0][aj] -= forcejx; m->gradient[1][aj] -= forcejy; m->gradient[2][aj] -= forcejz; } } } /**********************************************************************/ static void calculateGradientPairWise(ForceField* forceField) { gint i; gint ai, aj; AtomMol atomi,atomj; gdouble rijx, rijy, rijz; gdouble forceix, forceiy, forceiz; gdouble forcejx, forcejy, forcejz; gdouble permittivityScale = 1, permittivity = 1; gdouble coulombFactor; gdouble rij2, rij; gdouble rij3; gdouble chargei, chargej, coulombTerm; gdouble rij6, rij7, rij8, rij9, rij10, rij11, rij12; gdouble term1, term6, term8, term10, termAll; gdouble A, Beta, C6, C8, C10,b; gdouble s, sp, fact, br, brk, ebr; gint n, k; gboolean useCoulomb = forceField->options.coulomb; gboolean useVanderWals = forceField->options.vanderWals; Molecule* m = &forceField->molecule; gdouble* pairWiseTerms[PAIRWISEDIM]; gint numberOfPairWise = forceField->numberOfPairWise; for(i=0;i<PAIRWISEDIM;i++) pairWiseTerms[i] = forceField->pairWiseTerms[i]; /* non-bonded part */ coulombFactor = 332.05382 / ( permittivity * permittivityScale ); for ( i = 0; i < numberOfPairWise; i++ ) { ai = (gint)pairWiseTerms[0][i]; aj = (gint)pairWiseTerms[1][i]; A = pairWiseTerms[2][i]; Beta = pairWiseTerms[3][i]; C6 = pairWiseTerms[4][i]; C8 = pairWiseTerms[5][i]; C10 = pairWiseTerms[6][i]; b = pairWiseTerms[7][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; chargei = atomi.charge; chargej = atomj.charge; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; rij2 = rijx * rijx + rijy * rijy + rijz * rijz; if ( rij2 < 1.0e-8 ) rij2 = 1.0e-8; rij = sqrt( rij2 ); rij3 = rij2 * rij; rij6 = rij3 * rij3; rij7 = rij6 * rij; rij8 = rij7 * rij; rij9 = rij8 * rij; rij10 = rij9 * rij; rij11 = rij10 * rij; rij12 = rij11 * rij; if(useCoulomb) coulombTerm = ( chargei * chargej * coulombFactor ) / rij3; else coulombTerm = 0.0; /* printf("A = %f Beta = %f qi = %f qj = %f rij = %f\n",A,Beta,chargei,chargej,rij);*/ /*term1 = -A*Beta/rij*exp(-Beta*rij);*/ term1 = A*Beta/rij*exp(-Beta*rij); br = b*rij; ebr = exp(-b*rij); term6 = 0.0; if(useVanderWals && fabs(C6)>1e-12) { fact = 1.0; s = 1.0; n = 3; brk = 1.0; for(k=1;k<2*n;k++) { fact *= k; brk *= br; s += brk/fact; } sp = s*b; fact *=2*n; brk *= br; s += brk/fact; term6 = b*C6*ebr*s/rij7 -(2*n)*C6*(1-ebr*s)/rij8 -C6*ebr/rij7*sp; } term8 = 0.0; if(useVanderWals && fabs(C8)>1e-12) { fact = 1.0; s = 1.0; n = 4; brk = 1.0; for(k=1;k<2*n;k++) { fact *= k; brk *= br; s += brk/fact; } sp = s*b; fact *=2*n; brk *= br; s += brk/fact; term8 = b*C8*ebr*s/rij9 -(2*n)*C8*(1-ebr*s)/rij10 -C8*ebr/rij9*sp; } term10 = 0.0; if(useVanderWals && fabs(C10)>1e-12) { fact = 1.0; s = 1.0; n = 5; brk = 1.0; for(k=1;k<2*n;k++) { fact *= k; brk *= br; s += brk/fact; } sp = s*b; fact *=2*n; brk *= br; s += brk/fact; term10 = b*C10*ebr*s/rij11 -(2*n)*C10*(1-ebr*s)/rij12 -C10*ebr/rij11*sp; } //termAll = term1 - term6 - term8 - term10 + coulombTerm; termAll = term1 + term6 + term8 + term10 + coulombTerm; forceix = termAll * rijx; forceiy = termAll * rijy; forceiz = termAll * rijz; forcejx = - forceix; forcejy = - forceiy; forcejz = - forceiz; { m->gradient[0][ai] -= forceix; m->gradient[1][ai] -= forceiy; m->gradient[2][ai] -= forceiz; m->gradient[0][aj] -= forcejx; m->gradient[1][aj] -= forcejy; m->gradient[2][aj] -= forcejz; } } } /**********************************************************************/ static void calculateGradientAmber(ForceField* forceField) { gint i; gint j; Molecule* m = &forceField->molecule; for(j=0;j<3;j++) for( i=0; i<m->nAtoms;i++) m->gradient[j][i] = 0.0; calculateGradientBondAmber(forceField); if(StopCalcul) return; calculateGradientBendAmber(forceField); if(StopCalcul) return; calculateGradientDihedralAmber(forceField); if(StopCalcul) return; calculateGradientImproperTorsion(forceField); if(StopCalcul) return; calculateGradientNonBondedAmber(forceField); if(StopCalcul) return; calculateGradientHydrogenBondedAmber(forceField); /* printf("Before grad pairwise\n"); for( i=0; i<m->nAtoms;i++) for(j=0;j<3;j++) printf(" i = %d j = %d g = %f\n",i,j,m->gradient[j][i]); */ if(StopCalcul) return; calculateGradientPairWise(forceField); if(StopCalcul) return; /* printf("After grad pairwise\n"); for( i=0; i<m->nAtoms;i++) for(j=0;j<3;j++) printf(" i = %d j = %d g = %f\n",i,j,m->gradient[j][i]); */ for( i=0; i<m->nAtoms;i++) { if(!m->atoms[i].variable) for(j=0;j<3;j++) m->gradient[j][i] = 0.0; } } /**********************************************************************/ /* static void calculateGradientNumericAmber(ForceField* forceField) { gint i; gint j; Molecule* m = &forceField->molecule; gdouble h=0.0001; gdouble E1; gdouble E2; for(j=0;j<3;j++) for( i=0; i<m->nAtoms;i++) { while( gtk_events_pending() ) gtk_main_iteration(); if(StopCalcul) return; m->atoms[i].coordinates[j] += h; E1 = calculateEnergyTmpAmber(forceField,&m); m->atoms[i].coordinates[j] -= h+h; E2 = calculateEnergyTmpAmber(forceField,&m); m->atoms[i].coordinates[j] += h; m->gradient[j][i] = (E1-E2)/2/h; } } */ /**********************************************************************/ static gdouble calculateEnergyBondAmber(ForceField* forceField,Molecule* molecule) { gint i; gint ai, aj; AtomMol atomi,atomj; gdouble rijx, rijy, rijz, forceConstant, equilibriumDistance, term; Molecule* m = molecule; gdouble* bondStretchTerms[STRETCHDIM]; gint numberOfStretchTerms = forceField->numberOfStretchTerms; gdouble energy = 0.0; gdouble bondLength; for( i=0; i<STRETCHDIM;i++) bondStretchTerms[i] = forceField->bondStretchTerms[i]; #ifdef ENABLE_OMP #pragma omp parallel for private(i,ai,aj,forceConstant, equilibriumDistance,atomi,atomj,rijx,rijy,rijz,bondLength,term) reduction(+:energy) #endif for ( i = 0; i < numberOfStretchTerms; i++ ) { ai = (gint)bondStretchTerms[0][i]; aj = (gint)bondStretchTerms[1][i]; forceConstant = bondStretchTerms[2][i]; equilibriumDistance = bondStretchTerms[3][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; bondLength = sqrt( rijx * rijx + rijy * rijy + rijz * rijz ); term = bondLength - equilibriumDistance; energy += ( forceConstant ) * term * term; } return energy; } /**********************************************************************/ static gdouble calculateEnergyBendAmber(ForceField* forceField,Molecule* molecule) { gint i; gint ai, aj, ak; AtomMol atomi,atomj, atomk; gdouble thetaDeg; gdouble term; gdouble energy = 0.0; static gdouble D2RxD2R = 1/( RAD_TO_DEG*RAD_TO_DEG); Molecule* m = molecule; gdouble* angleBendTerms[BENDDIM]; gint numberOfBendTerms = forceField->numberOfBendTerms; for( i=0; i<BENDDIM;i++) angleBendTerms[i] = forceField->angleBendTerms[i]; #ifdef ENABLE_OMP #pragma omp parallel for private(i,ai,aj,ak,atomi,atomj,atomk,thetaDeg,term) reduction(+:energy) #endif for ( i = 0; i < numberOfBendTerms; i++ ) { ai = (gint)angleBendTerms[0][i]; aj = (gint)angleBendTerms[1][i]; ak = (gint)angleBendTerms[2][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; atomk = m->atoms[ak]; thetaDeg = getAngle(&atomi, &atomj, &atomk); term = thetaDeg - angleBendTerms[4][i]; term *= term * angleBendTerms[3][i]; term *= D2RxD2R; energy += term; /* printf("f =%f t0 = %f t= %f e= %f\n", angleBendTerms[3][i], angleBendTerms[4][i], thetaDeg, energy); */ } return energy; } /**********************************************************************/ static gdouble calculateEnergyDihedralAmber(ForceField* forceField,Molecule* molecule) { gint i; gint ai, aj, ak, al; AtomMol atomi,atomj, atomk, atoml; gdouble phiDeg; Molecule* m = molecule; gdouble* dihedralAngleTerms[DIHEDRALDIM]; gint numberOfDihedralTerms = forceField->numberOfDihedralTerms; gdouble energy = 0.0; static gdouble D2R = 1.0/57.29577951308232090712; for(i=0;i<DIHEDRALDIM;i++) dihedralAngleTerms[i] = forceField->dihedralAngleTerms[i]; #ifdef ENABLE_OMP #pragma omp parallel for private(i,ai,aj,ak,al,atomi,atomj,atomk,atoml,phiDeg) reduction(+:energy) #endif for ( i = 0; i < numberOfDihedralTerms; i++ ) { ai = (gint)dihedralAngleTerms[0][i]; aj = (gint)dihedralAngleTerms[1][i]; ak = (gint)dihedralAngleTerms[2][i]; al = (gint)dihedralAngleTerms[3][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; atomk = m->atoms[ak]; atoml = m->atoms[al]; phiDeg = getTorsion( &atomi ,&atomj, &atomk, &atoml); energy += dihedralAngleTerms[5][i]/dihedralAngleTerms[4][i] * ( 1.0 + cos( D2R*(dihedralAngleTerms[7][i] * phiDeg - dihedralAngleTerms[6][i] )) ); } return energy; } /**********************************************************************/ static gdouble calculateEnergyImproperTorsionAmber(ForceField* forceField,Molecule* molecule) { gdouble energy = 0.0; return energy; } /**********************************************************************/ static gdouble calculateEnergyfNonBondedAmber(ForceField* forceField,Molecule* molecule) { gint i; gint ai, aj; AtomMol atomi,atomj; gdouble rij2, rij6, rij12, coulombTerm, factorNonBonded; gdouble rijx, rijy, rijz; gdouble chargei, chargej, Aij, Bij, rij; gdouble permittivityScale = 1, permittivity = 1; gdouble coulombFactor; Molecule* m = molecule; gdouble* nonBondedTerms[NONBONDEDDIM]; gint numberOfNonBonded = forceField->numberOfNonBonded; gboolean useCoulomb = forceField->options.coulomb; gdouble energy = 0.0; for(i=0;i<NONBONDEDDIM;i++) nonBondedTerms[i] = forceField->nonBondedTerms[i]; /* now for non-bonded term */ coulombFactor = 332.05382 / ( permittivity * permittivityScale ); /*printf("number of Non Bonded terms = %d\n",numberOfNonBonded);*/ #ifdef ENABLE_OMP #pragma omp parallel for private(i,ai,aj,Aij,Bij,factorNonBonded,atomi,atomj,chargei,chargej,rijx,rijy,rijz,rij2,rij,rij6,rij12,coulombTerm) reduction(+:energy) #endif for ( i = 0; i < numberOfNonBonded; i++ ) { ai = (gint)nonBondedTerms[0][i]; aj = (gint)nonBondedTerms[1][i]; Aij = nonBondedTerms[2][i]; Bij = nonBondedTerms[3][i]; factorNonBonded = nonBondedTerms[4][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; chargei = atomi.charge; chargej = atomj.charge; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; rij2 = rijx * rijx + rijy * rijy + rijz * rijz; rij = sqrt( rij2 ); rij6 = rij2 * rij2 * rij2; rij12 = rij6 * rij6; if(useCoulomb) coulombTerm = ( chargei * chargej * coulombFactor*factorNonBonded ) / rij; else coulombTerm = 0.0; energy += Aij / rij12 - Bij / rij6 + coulombTerm; /* printf("A =%f B = %f r= %f e= %f\n", Aij,Bij ,rij,energy); */ } /* printf("Non Bonded energy = %f\n",energy);*/ return energy; } /**********************************************************************/ static gdouble calculateEnergyHydrogenBondedAmber(ForceField* forceField,Molecule* molecule) { gint i; gint ai, aj; AtomMol atomi,atomj; gdouble rij2, rij6, rij12; gdouble rijx, rijy, rijz; gdouble rij4, rij10; gdouble Cij, Dij; Molecule* m = molecule; gdouble* hydrogenBondedTerms[HYDROGENBONDEDDIM]; gint numberOfHydrogenBonded = forceField->numberOfHydrogenBonded; gdouble energy = 0.0; for(i=0;i<HYDROGENBONDEDDIM;i++) hydrogenBondedTerms[i] = forceField->hydrogenBondedTerms[i]; /* Hydrogen-bonded term */ #ifdef ENABLE_OMP #pragma omp parallel for private(i,ai,aj,Cij,Dij,atomi,atomj,rijx,rijy,rijz,rij2,rij4,rij6,rij10,rij12) reduction(+:energy) #endif for ( i = 0; i < numberOfHydrogenBonded; i++ ) { ai = (gint)hydrogenBondedTerms[0][i]; aj = (gint)hydrogenBondedTerms[1][i]; Cij = hydrogenBondedTerms[2][i]; Dij = hydrogenBondedTerms[3][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; rij2 = rijx * rijx + rijy * rijy + rijz * rijz; if ( rij2 < 1.0e-2 ) { printf("i = %d j = %d\n",ai,aj); rij2 = 1.0e-2; } rij4 = rij2 * rij2; rij6 = rij4 * rij2; rij10 = rij6 * rij4; rij12 = rij10 * rij2; energy += Cij / rij12 - Dij / rij10; /* printf("C =%f D = %f r= %f e= %f\n", Cij,Dij ,sqrt(rij2),energy); */ } return energy; } /**********************************************************************/ static gdouble calculateEnergyPairWise(ForceField* forceField,Molecule* molecule) { gint i; gint ai, aj; AtomMol atomi,atomj; gdouble rij2, rij6, rij8, rij10; gdouble coulombTerm; gdouble rijx, rijy, rijz; gdouble chargei, chargej, rij; gdouble permittivityScale = 1, permittivity = 1; gdouble coulombFactor; Molecule* m = molecule; gdouble* pairWiseTerms[PAIRWISEDIM]; gint numberOfPairWise = forceField->numberOfPairWise; gboolean useCoulomb = forceField->options.coulomb; gboolean useVanderWals = forceField->options.vanderWals; gdouble energy = 0.0; gdouble A, Beta; gdouble B6, B8, B10; gdouble c6, c8, c10, b; for(i=0;i<PAIRWISEDIM;i++) pairWiseTerms[i] = forceField->pairWiseTerms[i]; /* now for non-bonded term */ coulombFactor = 332.05382/ ( permittivity * permittivityScale ); /* printf("number of Non Bonded terms = %d\n",numberOfPairWise);*/ for ( i = 0; i < numberOfPairWise; i++ ) { ai = (gint)pairWiseTerms[0][i]; aj = (gint)pairWiseTerms[1][i]; A = pairWiseTerms[2][i]; Beta = pairWiseTerms[3][i]; c6 = pairWiseTerms[4][i]; c8 = pairWiseTerms[5][i]; c10 = pairWiseTerms[6][i]; b = pairWiseTerms[7][i]; atomi = m->atoms[ai]; atomj = m->atoms[aj]; chargei = atomi.charge; chargej = atomj.charge; rijx = atomi.coordinates[0] - atomj.coordinates[0]; rijy = atomi.coordinates[1] - atomj.coordinates[1]; rijz = atomi.coordinates[2] - atomj.coordinates[2]; rij2 = rijx * rijx + rijy * rijy + rijz * rijz; //if(rij2<1e-2) rij = 1e-2; rij = sqrt( rij2 ); rij6 = rij2 * rij2 * rij2; rij8 = rij6* rij2; rij10 = rij8 * rij2; if(useCoulomb) coulombTerm = ( chargei * chargej * coulombFactor ) / rij; else coulombTerm = 0.0; B6 = 0; B8 = 0; B10 = 0; /* printf("A = %f Beta = %f qi = %f qj = %f rij = %f\n",A,Beta,chargei,chargej,rij);*/ if(useVanderWals) { gdouble fact = 1.0; gdouble s = 1.0; gdouble br = b*rij; gdouble brk = 1.0; gint k; if(fabs(c6)>1e-12) { for(k=1;k<=2*3;k++) { fact *= k; brk *= br; s += brk/fact; } B6 = c6*(1-exp(-br)*s); } if(fabs(c8)>1e-12) { fact = 1.0; s = 1.0; br = b*rij; brk = 1.0; for(k=1;k<=2*4;k++) { fact *= k; brk *= br; s += brk/fact; } B8 = c8*(1-exp(-br)*s); } if(fabs(c10)>1e-12) { fact = 1.0; s = 1.0; br = b*rij; brk = 1.0; for(k=1;k<=2*5;k++) { fact *= k; brk *= br; s += brk/fact; } B10 = c10*(1-exp(-br)*s); } } energy += A*exp(-Beta*rij) - B6 / rij6 - B8 / rij8 - B10 / rij10 + coulombTerm; } return energy; } /**********************************************************************/ static void calculateEnergyAmber(ForceField* forceField) { Molecule* m = &forceField->molecule; forceField->molecule.energy = calculateEnergyTmpAmber(forceField,m); } /**********************************************************************/ static gdouble calculateEnergyTmpAmber(ForceField* forceField,Molecule* molecule) { gdouble energy = 0.0; energy +=calculateEnergyBondAmber(forceField,molecule); energy +=calculateEnergyBendAmber(forceField,molecule); energy +=calculateEnergyDihedralAmber(forceField,molecule); energy +=calculateEnergyImproperTorsionAmber(forceField,molecule); energy +=calculateEnergyfNonBondedAmber(forceField,molecule); energy +=calculateEnergyHydrogenBondedAmber(forceField,molecule); energy +=calculateEnergyPairWise(forceField,molecule); return energy; } /**********************************************************************/ ForceField createAmberModel (GeomDef* geom, gint Natoms,ForceFieldOptions forceFieldOptions) { ForceField forceField = newAmberModel(); forceField.molecule = createMolecule(geom,Natoms,TRUE); forceField.options = forceFieldOptions; set_text_to_draw(_("Setting of Parameters ...")); set_statubar_operation_str(_("Setting of Parameters ...")); drawGeom(); while( gtk_events_pending() ) gtk_main_iteration(); setAmberParameters(&forceField); drawGeom(); while( gtk_events_pending() ) gtk_main_iteration(); return forceField; } /**********************************************************************/ ForceField createPairWiseModel (GeomDef* geom,gint Natoms,ForceFieldOptions forceFieldOptions) { ForceField forceField = newPairWiseModel(); forceField.molecule = createMolecule(geom,Natoms,TRUE); forceField.options = forceFieldOptions; forceField.options.bondStretch = FALSE; forceField.options.angleBend = FALSE; forceField.options.dihedralAngle = FALSE; forceField.options.improperTorsion = FALSE; forceField.options.nonBonded = FALSE; forceField.options.hydrogenBonded = FALSE; set_text_to_draw(_("Setting of Parameters ...")); set_statubar_operation_str(_("Setting of Parameters ...")); drawGeom(); while( gtk_events_pending() ) gtk_main_iteration(); setAllPairWiseParameters(&forceField); drawGeom(); while( gtk_events_pending() ) gtk_main_iteration(); return forceField; } /**********************************************************************/ void loadAmberParameters() { AmberParameters amberParameters; gchar* persoFileName = g_strdup_printf("%s%sPersonalMM.prm",gabedit_directory(), G_DIR_SEPARATOR_S); gchar* defaultFileName = g_strdup_printf("%s%sMolecularMechanics.prm",gabedit_directory(), G_DIR_SEPARATOR_S); if(staticAmberParameters != NULL) freeAmberParameters(staticAmberParameters); amberParameters = newAmberParameters(); if(!readAmberParameters(&amberParameters,persoFileName)) if(!readAmberParameters(&amberParameters,defaultFileName)) { createMMFile(); if(!readAmberParameters(&amberParameters,defaultFileName)) { g_free(persoFileName); g_free(defaultFileName); return; } } staticAmberParameters = g_malloc(sizeof(AmberParameters)); *staticAmberParameters = amberParameters; g_free(persoFileName); g_free(defaultFileName); } /**********************************************************************/ void saveAmberParameters() { createPersonalParametersFile(staticAmberParameters); } /**********************************************************************/ AmberParameters* getPointerAmberParameters() { return staticAmberParameters; } /**********************************************************************/ void setPointerAmberParameters(AmberParameters* ptr) { staticAmberParameters = ptr; } /********************************************************************************/ gchar** getListMMTypes(gint* nlist) { gchar** t = NULL; gint i; *nlist = 0; if(!staticAmberParameters || staticAmberParameters->numberOfTypes<=0) return NULL; t = g_malloc(staticAmberParameters->numberOfTypes*sizeof(gchar*)); *nlist = staticAmberParameters->numberOfTypes; for(i=0;i<staticAmberParameters->numberOfTypes;i++) t[i] = g_strdup(staticAmberParameters->atomTypes[i].name); return t; }
pooling_3x3_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void pooling3x3s2_max_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); unsigned short* outptr = top_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmax v16.4s, v0.4s, v1.4s \n" "fmax v17.4s, v2.4s, v3.4s \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmax v18.4s, v4.4s, v5.4s \n" "fmax v19.4s, v6.4s, v7.4s \n" "ld1 {v8.4h}, [%1] \n" "shll v8.4s, v8.4h, #16 \n" "fmax v20.4s, v16.4s, v2.4s \n" "fmax v21.4s, v17.4s, v4.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmax v22.4s, v18.4s, v6.4s \n" "fmax v23.4s, v19.4s, v8.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmax v16.4s, v0.4s, v1.4s \n" "fmax v17.4s, v2.4s, v3.4s \n" "fmax v18.4s, v4.4s, v5.4s \n" "fmax v19.4s, v6.4s, v7.4s \n" "ld1 {v8.4h}, [%2] \n" "shll v8.4s, v8.4h, #16 \n" "fmax v24.4s, v16.4s, v2.4s \n" "fmax v25.4s, v17.4s, v4.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmax v26.4s, v18.4s, v6.4s \n" "fmax v27.4s, v19.4s, v8.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmax v16.4s, v0.4s, v1.4s \n" "fmax v17.4s, v2.4s, v3.4s \n" "fmax v18.4s, v4.4s, v5.4s \n" "fmax v19.4s, v6.4s, v7.4s \n" "ld1 {v8.4h}, [%3] \n" "shll v8.4s, v8.4h, #16 \n" "fmax v28.4s, v16.4s, v2.4s \n" "fmax v29.4s, v17.4s, v4.4s \n" "fmax v30.4s, v18.4s, v6.4s \n" "fmax v31.4s, v19.4s, v8.4s \n" "fmax v20.4s, v20.4s, v24.4s \n" "fmax v21.4s, v21.4s, v25.4s \n" "fmax v22.4s, v22.4s, v26.4s \n" "fmax v23.4s, v23.4s, v27.4s \n" "fmax v20.4s, v20.4s, v28.4s \n" "fmax v21.4s, v21.4s, v29.4s \n" "fmax v22.4s, v22.4s, v30.4s \n" "fmax v23.4s, v23.4s, v31.4s \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%0], #32 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.u16 {d4-d7}, [%1]! \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmax.f32 q0, q0, q4 \n" "vmax.f32 q1, q1, q5 \n" "pld [%3, #256] \n" "vld1.u16 {d20-d23}, [%3]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmax.f32 q2, q2, q6 \n" "vmax.f32 q3, q3, q7 \n" "vmax.f32 q0, q0, q8 \n" "vmax.f32 q1, q1, q9 \n" "pld [%1, #256] \n" "vld1.u16 {d12-d15}, [%1]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmax.f32 q2, q2, q10 \n" "vmax.f32 q3, q3, q11 \n" "pld [%2, #256] \n" "vld1.u16 {d20-d23}, [%2]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmax.f32 q4, q4, q8 \n" "vmax.f32 q5, q5, q9 \n" "pld [%3, #256] \n" "vld1.u16 {d28-d31}, [%3]! \n" "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmax.f32 q6, q6, q10 \n" "vmax.f32 q7, q7, q11 \n" "vmax.f32 q4, q4, q12 \n" "vmax.f32 q5, q5, q13 \n" "vld1.u16 {d25}, [%1] \n" "vld1.u16 {d27}, [%2] \n" "vshll.u16 q12, d25, #16 \n" "vshll.u16 q13, d27, #16 \n" "vmax.f32 q6, q6, q14 \n" "vmax.f32 q7, q7, q15 \n" "vld1.u16 {d29}, [%3] \n" "vshll.u16 q14, d29, #16 \n" "vmax.f32 q8, q12, q13 \n" "vmax.f32 q8, q8, q14 \n" "vmax.f32 q12, q0, q1 \n" "vmax.f32 q13, q2, q3 \n" "vmax.f32 q14, q4, q5 \n" "vmax.f32 q15, q6, q7 \n" "vmax.f32 q12, q12, q2 \n" "vmax.f32 q13, q13, q4 \n" "vmax.f32 q14, q14, q6 \n" "vmax.f32 q15, q15, q8 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0]! \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmax v16.4s, v0.4s, v4.4s \n" "fmax v17.4s, v1.4s, v5.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "fmax v18.4s, v2.4s, v6.4s \n" "fmax v19.4s, v3.4s, v7.4s \n" "ld1 {v0.4s}, [%1] \n" "fmax v16.4s, v16.4s, v20.4s \n" "fmax v17.4s, v17.4s, v21.4s \n" "ld1 {v1.4s}, [%2] \n" "fmax v18.4s, v18.4s, v22.4s \n" "fmax v19.4s, v19.4s, v23.4s \n" "ld1 {v2.4s}, [%3] \n" "fmax v3.4s, v0.4s, v1.4s \n" "fmax v20.4s, v16.4s, v17.4s \n" "fmax v21.4s, v18.4s, v19.4s \n" "fmax v3.4s, v3.4s, v2.4s \n" "fmax v20.4s, v20.4s, v18.4s \n" "fmax v21.4s, v21.4s, v3.4s \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "st1 {v20.4h, v21.4h}, [%0], #16 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.u16 {d4-d7}, [%1]! \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmax.f32 q12, q0, q4 \n" "vmax.f32 q13, q1, q5 \n" "pld [%3, #256] \n" "vld1.u16 {d20-d23}, [%3]! \n" "vshll.u16 q8, d20, #16 \n" "vshll.u16 q9, d21, #16 \n" "vshll.u16 q10, d22, #16 \n" "vshll.u16 q11, d23, #16 \n" "vmax.f32 q14, q2, q6 \n" "vmax.f32 q15, q3, q7 \n" "vld1.u16 {d1}, [%1] \n" "vshll.u16 q0, d1, #16 \n" "vmax.f32 q12, q12, q8 \n" "vmax.f32 q13, q13, q9 \n" "vld1.u16 {d3}, [%2] \n" "vshll.u16 q1, d3, #16 \n" "vmax.f32 q14, q14, q10 \n" "vmax.f32 q15, q15, q11 \n" "vld1.u16 {d5}, [%3] \n" "vshll.u16 q2, d5, #16 \n" "vmax.f32 q3, q0, q1 \n" "vmax.f32 q4, q12, q13 \n" "vmax.f32 q5, q14, q15 \n" "vmax.f32 q3, q3, q2 \n" "vmax.f32 q4, q4, q14 \n" "vmax.f32 q5, q5, q3 \n" "vshrn.u32 d8, q4, #16 \n" "vshrn.u32 d9, q5, #16 \n" "vst1.u16 {d8-d9}, [%0]! \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4)); float32x4_t _r12 = vcvt_f32_bf16(vld1_u16(r1 + 8)); float32x4_t _r20 = vcvt_f32_bf16(vld1_u16(r2)); float32x4_t _r21 = vcvt_f32_bf16(vld1_u16(r2 + 4)); float32x4_t _r22 = vcvt_f32_bf16(vld1_u16(r2 + 8)); float32x4_t _max0 = vmaxq_f32(vmaxq_f32(_r00, _r01), _r02); float32x4_t _max1 = vmaxq_f32(vmaxq_f32(_r10, _r11), _r12); float32x4_t _max2 = vmaxq_f32(vmaxq_f32(_r20, _r21), _r22); float32x4_t _max = vmaxq_f32(vmaxq_f32(_max0, _max1), _max2); vst1_u16(outptr, vcvt_bf16_f32(_max)); r0 += 8; r1 += 8; r2 += 8; outptr += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
net_ah_fmt_plug.c
/* Cracker for IPsec Authentication Header (AH) hashes. * * This software is Copyright (c) 2017, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_netah; #elif FMT_REGISTERS_H john_register_one(&fmt_netah); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include "formats.h" #include "hmacmd5.h" #include "misc.h" #include "common.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "net-ah" #define FORMAT_NAME "IPsec AH HMAC-MD5-96" #define FORMAT_TAG "$net-ah$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 8 // Keepalived limit is 8 #define BINARY_SIZE 12 #define BINARY_SIZE_ALLOC 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT_LEN 1500 static struct fmt_tests tests[] = { {"$net-ah$0$4500004000150000ff330000c0a87c01e000001270040000c0a87c01000000150000000000000000000000002133650102016c3e0a00018c0000000000000000$ad719a912d50a53935d9ad41", "monkey"}, {"$net-ah$0$4500004000190000ff330000c0a87c01e000001270040000c0a87c01000000190000000000000000000000002133650102016dc00a00000a0000000000000000$d790123ffdd3ddb2fe1d7205", "openwall"}, {"$net-ah$0$4500004000170000ff330000c0a87c01e000001270040000c0a87c01000000170000000000000000000000002133650102016dc00a00000a0000000000000000$bb615df255867845496392d8", "12345678"}, {"$net-ah$0$45000040001e0000ff330000c0a87c01e000001270040000c0a87c010000001e0000000000000000000000002133650102016dc00a00000a0000000000000000$7c6ba14741b4597750ffe6a1", "Müller"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE_ALLOC / sizeof(uint32_t)]; static struct custom_salt { uint32_t length; unsigned char salt[MAX_SALT_LEN]; // fixed len, but should be OK } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) // version / type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 0) goto err; if ((p = strtokm(NULL, "$")) == NULL) // salt goto err; if (hexlenl(p, &extra) > MAX_SALT_LEN * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) // binary goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); // version / type p = strtokm(NULL, "$"); // salt cs.length = strlen(p) / 2; for (i = 0; i < cs.length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; MEM_FREE(keeptr); return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { HMACMD5Context ctx; hmac_md5_init_rfc2104((const unsigned char*)saved_key[index], strlen(saved_key[index]), &ctx); hmac_md5_update(cur_salt->salt, cur_salt->length, &ctx); hmac_md5_final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void netah_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_netah = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_TRUNC | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, netah_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
opencl_keychain_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_keychain); #else #include <stdint.h> #include <string.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "options.h" #include "jumbo.h" #include "common-opencl.h" #define FORMAT_LABEL "keychain-opencl" #define FORMAT_NAME "Mac OS X Keychain" #define FORMAT_TAG "$keychain$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*salt_struct) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define SALTLEN 20 #define IVLEN 8 #define CTLEN 48 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keychain_password; typedef struct { uint32_t v[32/4]; } keychain_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } keychain_salt; static int *cracked; static int any_cracked; static struct fmt_main *self; static struct fmt_tests keychain_tests[] = { {"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"}, // these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash. {"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"}, {"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"}, {"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"}, {"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"}, {NULL} }; static struct custom_salt { unsigned char salt[SALTLEN]; unsigned char iv[IVLEN]; unsigned char ct[CTLEN]; } *salt_struct; static cl_int cl_error; static keychain_password *inbuffer; static keychain_hash *outbuffer; static keychain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(keychain_password) * gws; outsize = sizeof(keychain_hash) * gws; settingsize = sizeof(keychain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keychain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != SALTLEN * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != IVLEN * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if (hexlenl(p, &extra) != CTLEN * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt *salt_struct; if (!salt_struct) salt_struct = mem_calloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); ctcopy += FORMAT_TAG_LEN; /* skip over "$keychain$*" */ p = strtokm(ctcopy, "*"); for (i = 0; i < SALTLEN; i++) salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < IVLEN; i++) salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < CTLEN; i++) salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)salt_struct; } static void set_salt(void *salt) { salt_struct = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, salt_struct->salt, 20); currentsalt.length = 20; currentsalt.iterations = 1000; currentsalt.outlen = 24; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data) { unsigned char out[CTLEN]; DES_cblock key1, key2, key3; DES_cblock ivec; DES_key_schedule ks1, ks2, ks3; memset(out, 0, sizeof(out)); memcpy(key1, key, 8); memcpy(key2, key + 8, 8); memcpy(key3, key + 16, 8); DES_set_key((DES_cblock *) key1, &ks1); DES_set_key((DES_cblock *) key2, &ks2); DES_set_key((DES_cblock *) key3, &ks3); memcpy(ivec, iv, 8); DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT); /* possible bug here, is this assumption (pad of 4) always valid? */ if (out[47] != 4 || check_pkcs_pad(out, CTLEN, 8) < 0) return -1; return 0; } #if 0 //#ifdef DEBUG static void print_hex(unsigned char *str, int len) { int i; for (i = 0; i < len; ++i) printf("%02x", str[i]); printf("\n"); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!kcdecrypt((unsigned char*)outbuffer[index].v, salt_struct->iv, salt_struct->ct)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, keychain_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_binop__lor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__lor_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint64) // A*D function (colscale): GB (_AxD__lor_uint64) // D*A function (rowscale): GB (_DxB__lor_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__lor_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__lor_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint64) // C=scalar+B GB (_bind1st__lor_uint64) // C=scalar+B' GB (_bind1st_tran__lor_uint64) // C=A+scalar GB (_bind2nd__lor_uint64) // C=A'+scalar GB (_bind2nd_tran__lor_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT64 || GxB_NO_LOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <float.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif #include "main.h" #define min(a, b) ((a<b)?a:b) #define max(a, b) ((a>b)?a:b) void parse(int argc, char* argv[], struct user_parameters* params) { int i; for(i=1; i<argc; i++) { if(!strcmp(argv[i], "-c")) params->check = 1; else if(!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("----------------------------------------------\n"); printf("- KaStORS -\n"); printf("- Kaapi Starpu OpenMP Runtime task Suite -\n"); printf("----------------------------------------------\n"); printf("-h, --help : Show help information\n"); printf("-c : Ask to check result\n"); printf("-i : Number of iterations\n"); printf("-n : Matrix size\n"); printf("-s : Cutoff (Size of the matrix)\n"); printf("-d : Cutoff (depth)\n"); printf("-t : Choose algorithm (leaving blank will run type task)\n(Options for type) 1 - task, 2 - task with depend\n"); exit(EXIT_SUCCESS); } else if(!strcmp(argv[i], "-i")) { if (++i < argc) params->niter = atoi(argv[i]); else { fprintf(stderr, "-i requires a number\n"); exit(EXIT_FAILURE); } } else if(!strcmp(argv[i], "-n")) { if (++i < argc) params->matrix_size = atoi(argv[i]); else { fprintf(stderr, "-n requires a number\n"); exit(EXIT_FAILURE); } } else if(!strcmp(argv[i], "-s")) { if (++i < argc) params->cutoff_size = atoi(argv[i]); else { fprintf(stderr, "-s requires a number\n"); exit(EXIT_FAILURE); } } else if(!strcmp(argv[i], "-d")) { if (++i < argc) params->cutoff_depth = atoi(argv[i]); else { fprintf(stderr, "-d requires a number\n"); exit(EXIT_FAILURE); } } else if(!strcmp(argv[i], "-t")) { if (++i < argc) params->type = atoi(argv[i]); else { fprintf(stderr, "-t requires a number\n"); exit(EXIT_FAILURE); } } else fprintf(stderr, "Unknown parameter : %s\n", argv[i]); } } int comp (const void * elem1, const void * elem2) { double f = *((double*)elem1); double s = *((double*)elem2); if (f > s) return 1; if (f < s) return -1; return 0; } int main(int argc, char* argv[]) { int num_threads = 1; struct user_parameters params; memset(&params, 0, sizeof(params)); /* default value */ params.niter = 1; parse(argc, argv, &params); // get Number of thread if OpenMP is activated #ifdef _OPENMP #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); #endif // warmup run(&params); double mean = 0.0; double meansqr = 0.0; double min_ = DBL_MAX; double max_ = -1; double* all_times = (double*)malloc(sizeof(double) * params.niter); for (int i=0; i<params.niter; ++i) { double cur_time = run(&params); all_times[i] = cur_time; mean += cur_time; min_ = min(min_, cur_time); max_ = max(max_, cur_time); meansqr += cur_time * cur_time; } mean /= params.niter; meansqr /= params.niter; double stddev = sqrt(meansqr - mean * mean); qsort(all_times, params.niter, sizeof(double), comp); double median = all_times[params.niter / 2]; free(all_times); printf("Program : %s\n", argv[0]); printf("Size : %d\n", params.matrix_size); printf("Iterations : %d\n", params.niter); printf("Cutoff Size : %d\n", params.cutoff_size); printf("Cutoff depth : %d\n", params.cutoff_depth); printf("Threads : %d\n", num_threads); #ifdef GFLOPS printf("Gflops:: "); #else printf("Time(sec):: "); #endif printf("avg : %lf :: std : %lf :: min : %lf :: max : %lf :: median : %lf\n", mean, stddev, min_, max_, median); if(params.check) printf("Check : %s\n", (params.succeed)? ((params.succeed > 1)?"not implemented":"success") :"fail"); if (params.string2display !=0) printf("%s", params.string2display); printf("\n"); /* Rodar aqui o codigo sequencial run_seq*/ printf("Running Sequential code\n"); struct user_parameters params_seq; memset(&params_seq, 0, sizeof(params_seq)); /* default value */ params_seq.niter = params.niter; params_seq.matrix_size = params.matrix_size; params_seq.cutoff_size = params.cutoff_size; params_seq.cutoff_depth = params.cutoff_depth; params_seq.check = params.check; params_seq.type = 3; //strcpy(params_seq.string2display, params.string2display); params_seq.string2display = params.string2display; //parse(argc, argv, &params_seq); // warmup run(&params_seq); double mean_seq = 0.0; double meansqr_seq = 0.0; double min_seq = DBL_MAX; double max_seq = -1; double* all_times_seq = (double*)malloc(sizeof(double) * params_seq.niter); for (int i=0; i<params_seq.niter; ++i) { double cur_time = run(&params_seq); all_times_seq[i] = cur_time; mean_seq += cur_time; min_seq = min(min_, cur_time); max_seq = max(max_, cur_time); meansqr_seq += cur_time * cur_time; } mean_seq /= params_seq.niter; meansqr_seq /= params_seq.niter; double stddev_seq = sqrt(meansqr_seq - mean_seq * mean_seq); qsort(all_times_seq, params_seq.niter, sizeof(double), comp); double median_seq = all_times_seq[params_seq.niter / 2]; free(all_times_seq); printf("Sequential Stats\n"); printf("Program : %s\n", argv[0]); printf("Size : %d\n", params_seq.matrix_size); printf("Iterations : %d\n", params_seq.niter); printf("Cutoff Size : %d\n", params_seq.cutoff_size); printf("Cutoff depth : %d\n", params_seq.cutoff_depth); #ifdef GFLOPS printf("Gflops:: "); #else printf("Time(sec):: "); #endif printf("avg : %lf :: std : %lf :: min : %lf :: max : %lf :: median : %lf\n", mean_seq, stddev_seq, min_seq, max_seq, median_seq); if(params_seq.check) printf("Check : %s\n", (params_seq.succeed)? ((params_seq.succeed > 1)?"not implemented":"success") :"fail"); if (params_seq.string2display !=0) printf("%s", params_seq.string2display); printf("\n"); return 0; }
GB_binop__lor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lor_int16) // A.*B function (eWiseMult): GB (_AemultB_03__lor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int16) // A*D function (colscale): GB (_AxD__lor_int16) // D*A function (rowscale): GB (_DxB__lor_int16) // C+=B function (dense accum): GB (_Cdense_accumB__lor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__lor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int16) // C=scalar+B GB (_bind1st__lor_int16) // C=scalar+B' GB (_bind1st_tran__lor_int16) // C=A+scalar GB (_bind2nd__lor_int16) // C=A'+scalar GB (_bind2nd_tran__lor_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT16 || GxB_NO_LOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
single_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp=libiomp5 -verify %s void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause() { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait() { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
mm.best-par.c
#include <stdio.h> #include <sys/time.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define M NCONT #define N NCONT #define K CONT double A[M][K]; double B[K][N]; double C[M][N]; void init_arrays() { int i1, i2; for (i1=0; i1<M; i1++) for (i2=0; i2<K; i2++) A[i1][i2] = (i1+i2) % 5 + 1; for (i1=0; i1<K; i1++) for (i2=0; i2<N; i2++) B[i1][i2] = (i1+i2) % 5 + 1; for (i1=0; i1<M; i1++) for (i2=0; i2<N; i2++) C[i1][i2] = 0; } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); int i, j, k; int ii, jj, kk; int iii, jjj, kkk; { double C_copy[128][64]; double B_copy; double A_copy[256][128]; register int cbv_1; cbv_1=K-1; #pragma omp parallel for private(iii,jjj,kkk,ii,jj,kk,i,j,k,A_copy,B_copy,C_copy) for (kkk=0; kkk<=cbv_1; kkk=kkk+512) { for (iii=0; iii<=M-1; iii=iii+256) { for (jjj=0; jjj<=N-1; jjj=jjj+1024) { for (kk=kkk; kk<=min(K-1,kkk+256); kk=kk+256) { for (ii=iii; ii<=min(M-1,iii+128); ii=ii+128) { for (k=kk; k<=min(K-1,kk+255); k=k+1) for (i=ii; i<=min(M-1,ii+127); i=i+1) A_copy[(k-kk)][(i-ii)]=A[i][k]; for (jj=jjj; jj<=min(N-1,jjj+960); jj=jj+64) { for (i=ii; i<=min(M-1,ii+127); i=i+1) for (j=jj; j<=min(N-1,jj+63); j=j+1) C_copy[(i-ii)][(j-jj)]=C[i][j]; for (k=kk; k<=min(K-1,kk+255)-7; k=k+8) { for (i=ii; i<=min(M-1,ii+127)-7; i=i+8) { register int cbv_2; cbv_2=min(N-1,jj+63); #pragma ivdep #pragma vector always for (j=jj; j<=cbv_2; j=j+1) { double scv_1, scv_2, scv_3, scv_4, scv_5, scv_6, scv_7, scv_8; double scv_9, scv_10, scv_11, scv_12, scv_13, scv_14, scv_15, scv_16; scv_1=B[k][j]; scv_2=B[(k+6)][j]; scv_3=B[(k+5)][j]; scv_4=C_copy[(i-ii+4)][(j-jj)]; scv_5=C_copy[(i-ii+2)][(j-jj)]; scv_6=B[(k+4)][j]; scv_7=C_copy[(i-ii+3)][(j-jj)]; scv_8=C_copy[(i-ii+6)][(j-jj)]; scv_9=B[(k+3)][j]; scv_10=C_copy[(i-ii+5)][(j-jj)]; scv_11=C_copy[(i-ii+1)][(j-jj)]; scv_12=B[(k+1)][j]; scv_13=C_copy[(i-ii+7)][(j-jj)]; scv_14=B[(k+2)][j]; scv_15=B[(k+7)][j]; scv_16=C_copy[(i-ii)][(j-jj)]; scv_16=scv_16+A_copy[(k-kk)][(i-ii)]*scv_1; scv_11=scv_11+A_copy[(k-kk)][(i-ii+1)]*scv_1; scv_5=scv_5+A_copy[(k-kk)][(i-ii+2)]*scv_1; scv_7=scv_7+A_copy[(k-kk)][(i-ii+3)]*scv_1; scv_4=scv_4+A_copy[(k-kk)][(i-ii+4)]*scv_1; scv_10=scv_10+A_copy[(k-kk)][(i-ii+5)]*scv_1; scv_8=scv_8+A_copy[(k-kk)][(i-ii+6)]*scv_1; scv_13=scv_13+A_copy[(k-kk)][(i-ii+7)]*scv_1; scv_16=scv_16+A_copy[(k-kk+1)][(i-ii)]*scv_12; scv_11=scv_11+A_copy[(k-kk+1)][(i-ii+1)]*scv_12; scv_5=scv_5+A_copy[(k-kk+1)][(i-ii+2)]*scv_12; scv_7=scv_7+A_copy[(k-kk+1)][(i-ii+3)]*scv_12; scv_4=scv_4+A_copy[(k-kk+1)][(i-ii+4)]*scv_12; scv_10=scv_10+A_copy[(k-kk+1)][(i-ii+5)]*scv_12; scv_8=scv_8+A_copy[(k-kk+1)][(i-ii+6)]*scv_12; scv_13=scv_13+A_copy[(k-kk+1)][(i-ii+7)]*scv_12; scv_16=scv_16+A_copy[(k-kk+2)][(i-ii)]*scv_14; scv_11=scv_11+A_copy[(k-kk+2)][(i-ii+1)]*scv_14; scv_5=scv_5+A_copy[(k-kk+2)][(i-ii+2)]*scv_14; scv_7=scv_7+A_copy[(k-kk+2)][(i-ii+3)]*scv_14; scv_4=scv_4+A_copy[(k-kk+2)][(i-ii+4)]*scv_14; scv_10=scv_10+A_copy[(k-kk+2)][(i-ii+5)]*scv_14; scv_8=scv_8+A_copy[(k-kk+2)][(i-ii+6)]*scv_14; scv_13=scv_13+A_copy[(k-kk+2)][(i-ii+7)]*scv_14; scv_16=scv_16+A_copy[(k-kk+3)][(i-ii)]*scv_9; scv_11=scv_11+A_copy[(k-kk+3)][(i-ii+1)]*scv_9; scv_5=scv_5+A_copy[(k-kk+3)][(i-ii+2)]*scv_9; scv_7=scv_7+A_copy[(k-kk+3)][(i-ii+3)]*scv_9; scv_4=scv_4+A_copy[(k-kk+3)][(i-ii+4)]*scv_9; scv_10=scv_10+A_copy[(k-kk+3)][(i-ii+5)]*scv_9; scv_8=scv_8+A_copy[(k-kk+3)][(i-ii+6)]*scv_9; scv_13=scv_13+A_copy[(k-kk+3)][(i-ii+7)]*scv_9; scv_16=scv_16+A_copy[(k-kk+4)][(i-ii)]*scv_6; scv_11=scv_11+A_copy[(k-kk+4)][(i-ii+1)]*scv_6; scv_5=scv_5+A_copy[(k-kk+4)][(i-ii+2)]*scv_6; scv_7=scv_7+A_copy[(k-kk+4)][(i-ii+3)]*scv_6; scv_4=scv_4+A_copy[(k-kk+4)][(i-ii+4)]*scv_6; scv_10=scv_10+A_copy[(k-kk+4)][(i-ii+5)]*scv_6; scv_8=scv_8+A_copy[(k-kk+4)][(i-ii+6)]*scv_6; scv_13=scv_13+A_copy[(k-kk+4)][(i-ii+7)]*scv_6; scv_16=scv_16+A_copy[(k-kk+5)][(i-ii)]*scv_3; scv_11=scv_11+A_copy[(k-kk+5)][(i-ii+1)]*scv_3; scv_5=scv_5+A_copy[(k-kk+5)][(i-ii+2)]*scv_3; scv_7=scv_7+A_copy[(k-kk+5)][(i-ii+3)]*scv_3; scv_4=scv_4+A_copy[(k-kk+5)][(i-ii+4)]*scv_3; scv_10=scv_10+A_copy[(k-kk+5)][(i-ii+5)]*scv_3; scv_8=scv_8+A_copy[(k-kk+5)][(i-ii+6)]*scv_3; scv_13=scv_13+A_copy[(k-kk+5)][(i-ii+7)]*scv_3; scv_16=scv_16+A_copy[(k-kk+6)][(i-ii)]*scv_2; scv_11=scv_11+A_copy[(k-kk+6)][(i-ii+1)]*scv_2; scv_5=scv_5+A_copy[(k-kk+6)][(i-ii+2)]*scv_2; scv_7=scv_7+A_copy[(k-kk+6)][(i-ii+3)]*scv_2; scv_4=scv_4+A_copy[(k-kk+6)][(i-ii+4)]*scv_2; scv_10=scv_10+A_copy[(k-kk+6)][(i-ii+5)]*scv_2; scv_8=scv_8+A_copy[(k-kk+6)][(i-ii+6)]*scv_2; scv_13=scv_13+A_copy[(k-kk+6)][(i-ii+7)]*scv_2; scv_16=scv_16+A_copy[(k-kk+7)][(i-ii)]*scv_15; scv_11=scv_11+A_copy[(k-kk+7)][(i-ii+1)]*scv_15; scv_5=scv_5+A_copy[(k-kk+7)][(i-ii+2)]*scv_15; scv_7=scv_7+A_copy[(k-kk+7)][(i-ii+3)]*scv_15; scv_4=scv_4+A_copy[(k-kk+7)][(i-ii+4)]*scv_15; scv_10=scv_10+A_copy[(k-kk+7)][(i-ii+5)]*scv_15; scv_8=scv_8+A_copy[(k-kk+7)][(i-ii+6)]*scv_15; scv_13=scv_13+A_copy[(k-kk+7)][(i-ii+7)]*scv_15; C_copy[(i-ii+4)][(j-jj)]=scv_4; C_copy[(i-ii+2)][(j-jj)]=scv_5; C_copy[(i-ii+3)][(j-jj)]=scv_7; C_copy[(i-ii+6)][(j-jj)]=scv_8; C_copy[(i-ii+5)][(j-jj)]=scv_10; C_copy[(i-ii+1)][(j-jj)]=scv_11; C_copy[(i-ii+7)][(j-jj)]=scv_13; C_copy[(i-ii)][(j-jj)]=scv_16; } } for (; i<=min(M-1,ii+127); i=i+1) { register int cbv_3; cbv_3=min(N-1,jj+63); #pragma ivdep #pragma vector always for (j=jj; j<=cbv_3; j=j+1) { double scv_17; scv_17=C_copy[(i-ii)][(j-jj)]; scv_17=scv_17+A_copy[(k-kk)][(i-ii)]*B[k][j]; scv_17=scv_17+A_copy[(k-kk+1)][(i-ii)]*B[(k+1)][j]; scv_17=scv_17+A_copy[(k-kk+2)][(i-ii)]*B[(k+2)][j]; scv_17=scv_17+A_copy[(k-kk+3)][(i-ii)]*B[(k+3)][j]; scv_17=scv_17+A_copy[(k-kk+4)][(i-ii)]*B[(k+4)][j]; scv_17=scv_17+A_copy[(k-kk+5)][(i-ii)]*B[(k+5)][j]; scv_17=scv_17+A_copy[(k-kk+6)][(i-ii)]*B[(k+6)][j]; scv_17=scv_17+A_copy[(k-kk+7)][(i-ii)]*B[(k+7)][j]; C_copy[(i-ii)][(j-jj)]=scv_17; } } } for (; k<=min(K-1,kk+255); k=k+1) { for (i=ii; i<=min(M-1,ii+127)-7; i=i+8) { register int cbv_4; cbv_4=min(N-1,jj+63); #pragma ivdep #pragma vector always for (j=jj; j<=cbv_4; j=j+1) { double scv_18, scv_19, scv_20, scv_21, scv_22, scv_23, scv_24, scv_25; double scv_26; scv_18=C_copy[(i-ii+7)][(j-jj)]; scv_19=B[k][j]; scv_20=C_copy[(i-ii+5)][(j-jj)]; scv_21=C_copy[(i-ii+3)][(j-jj)]; scv_22=C_copy[(i-ii+4)][(j-jj)]; scv_23=C_copy[(i-ii+6)][(j-jj)]; scv_24=C_copy[(i-ii+1)][(j-jj)]; scv_25=C_copy[(i-ii)][(j-jj)]; scv_26=C_copy[(i-ii+2)][(j-jj)]; scv_25=scv_25+A_copy[(k-kk)][(i-ii)]*scv_19; scv_24=scv_24+A_copy[(k-kk)][(i-ii+1)]*scv_19; scv_26=scv_26+A_copy[(k-kk)][(i-ii+2)]*scv_19; scv_21=scv_21+A_copy[(k-kk)][(i-ii+3)]*scv_19; scv_22=scv_22+A_copy[(k-kk)][(i-ii+4)]*scv_19; scv_20=scv_20+A_copy[(k-kk)][(i-ii+5)]*scv_19; scv_23=scv_23+A_copy[(k-kk)][(i-ii+6)]*scv_19; scv_18=scv_18+A_copy[(k-kk)][(i-ii+7)]*scv_19; C_copy[(i-ii+7)][(j-jj)]=scv_18; C_copy[(i-ii+5)][(j-jj)]=scv_20; C_copy[(i-ii+3)][(j-jj)]=scv_21; C_copy[(i-ii+4)][(j-jj)]=scv_22; C_copy[(i-ii+6)][(j-jj)]=scv_23; C_copy[(i-ii+1)][(j-jj)]=scv_24; C_copy[(i-ii)][(j-jj)]=scv_25; C_copy[(i-ii+2)][(j-jj)]=scv_26; } } for (; i<=min(M-1,ii+127); i=i+1) { register int cbv_5; cbv_5=min(N-1,jj+63); #pragma ivdep #pragma vector always for (j=jj; j<=cbv_5; j=j+1) { double scv_27; scv_27=C_copy[(i-ii)][(j-jj)]; scv_27=scv_27+A_copy[(k-kk)][(i-ii)]*B[k][j]; C_copy[(i-ii)][(j-jj)]=scv_27; } } } for (i=ii; i<=min(M-1,ii+127); i=i+1) for (j=jj; j<=min(N-1,jj+63); j=j+1) C[i][j]=C_copy[(i-ii)][(j-jj)]; } } } } } } } annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return 1; }
parallel_macros.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // Utility macros for parallelism. // ========================================================================== #ifndef SEQAN_PARALLEL_PARALLEL_MACROS_H_ #define SEQAN_PARALLEL_PARALLEL_MACROS_H_ /** .Macro.SEQAN_OMP_PRAGMA ..summary:Portable conditional $#pragma$ issuing if OpenMP is enabled. ..cat:Parallelism ..signature:SEQAN_OMP_PRAGMA(x) ..param.x:The string to issue behind $#pragma omp$. ..remarks:This macro uses portable pragma generation, dependent on the macro $_OPENMP$ being defined (as by the OpenMP standard). ..remarks:This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP to suppress warnings. ..example.text:Parallelize loop with OpenMP if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for for (int i = 0; i < x; ++i) { // Do work. } ..example.text:Make an addition atomic if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic i += 1; */ #ifdef _OPENMP #include <omp.h> #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // GCC _Pragma operator #define SEQAN_DO_PRAGMA(x) _Pragma(#x) #define SEQAN_OMP_PRAGMA(x) SEQAN_DO_PRAGMA(omp x) #else // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // MSVC __pragma-operator #define SEQAN_OMP_PRAGMA(x) __pragma (omp x) #endif // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) #else // #ifdef _OPENMP #define SEQAN_OMP_PRAGMA(x) #endif // #ifdef _OPENMP #endif // SEQAN_PARALLEL_PARALLEL_MACROS_H_
SKIM.h
/* Algorithm for Influence Estimation and Maximization Copyright (c) Microsoft Corporation All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <array> #include <vector> #include <iostream> #include <fstream> #include <unordered_map> #include <random> #include <climits> #include <omp.h> using namespace std; #include "FastStaticGraphs.h" #include "Macros.h" #include "FastSet.h" #include "HashPair.h" #include "Timer.h" #include "KHeap.h" namespace Algorithms{ namespace InfluenceMaximization { class SKIM { public: // Type definitions. typedef DataStructures::Graphs::FastUnweightedGraph GraphType; typedef GraphType::ArcIdType ArcIdType; static const uint32_t NullVertex = UINT_MAX; enum ModelType { WEIGHTED, BINARY, TRIVALENCY }; // A seed vertex and associated data. struct SeedType { uint32_t VertexId = NullVertex; double EstimatedInfluence = 0.0; double ExactInfluence = 0.0; double BuildSketchesElapsedMilliseconds = 0; double ComputeInfluenceElapsedMilliseconds = 0; }; // Default constructor. SKIM(GraphType &g, const uint32_t s, const bool v) : verbose(v), randomSeed(s), graph(g), resolution(3000000), indeg(graph.NumVertices(), 0), binprob(resolution / 10), triprob{ { resolution / 10, resolution / 100, resolution / 1000 } }//, //dis(0, resolution-1), //gen(s) { if (verbose) cout << "Computing in-degrees... " << flush; // Compute the degrees. FORALL_ARCS(graph, vertexId, arc) { if (!arc->Forward()) continue; ++indeg[arc->OtherVertexId()]; } if (verbose) cout << "done." << endl; } // Set the binary probability. inline void SetBinaryProbability(const double prob) { binprob = uint32_t(prob * double(resolution)); } // Run. template<ModelType modelType> inline void Run(uint32_t N, const uint16_t k, const uint16_t l, const uint16_t lEval, const int32_t numt, const string statsFilename = "", const string coverageFilename = "") { // Set N to number of vertices, if it's zero. if (N == 0) N = static_cast<uint32_t>(graph.NumVertices()); /* Initialize the algorithm. */ if (verbose) cout << "Setting up data structures... " << flush; // Some datastructures that are necessary for the algorithm. const uint64_t nl = graph.NumVertices()*l; vector<SeedType> seedSet; // this will hold the seed vertices. vector<uint32_t> permutation; // this is a permutation of the vertices to draw ranks from. unordered_map< pair<uint32_t, uint16_t>, vector<uint32_t> > invSketches; // these are the "inverse sketches" (search spaces). vector<uint16_t> sketchSizes(graph.NumVertices(), 0); // these are the sizes of the real sketches. vector<vector<bool>> covered(l); // this indicates whether a vertex/instance pair has been covered (influenced). vector<vector<bool>> processed(l); // this indicates whether a vertex/instance pair has been processed (sketches built from it). vector<DataStructures::Container::FastSet<uint32_t>> searchSpaces(numt); // this is for maintaining search spaces of BFSes; one per thread. DataStructures::Container::FastSet<uint32_t> &S0 = searchSpaces[0]; vector<vector<pair<uint32_t, uint16_t>>> updateQueues(numt); vector<vector<uint32_t>> buck; vector<uint32_t> buckind; uint16_t buckp(0); mt19937_64 rnd(randomSeed); // Random number generator. uniform_int_distribution<uint16_t> distr(0, l - 1); uint64_t rank(0); // this is the current rank value. Platform::Timer timer, globalTimer; double estinf(0), exinf(0), exinfloc(0), sketchms(0), infms(0); bool runParallel(numt > 1), saturated(false); uint32_t numperm(0), permthresh(l - (l / 10 + 1)); for (int32_t t = 0; t < numt; ++t) searchSpaces[t].Resize(graph.NumVertices()); for (uint16_t i(0); i < l; ++i) { processed[i].resize(graph.NumVertices(), false); covered[i].resize(graph.NumVertices(), false); } if (verbose) cout << "done." << endl; /* Main iterations loop. Each iteration computes one seed vertex. */ globalTimer.Start(); while (seedSet.size() < N) { SeedType newSeed; exinfloc = 0.0; /* BFS computation to build sketches. */ if (!saturated) { if (verbose) cout << "[" << seedSet.size() + 1 << "] Computing sketches from rank " << rank << "... " << flush; timer.Start(); while (rank < nl) { // Select next vertex/instance pair. const Types::SizeType vi = rank % graph.NumVertices(); if (vi == 0) { if (permutation.size() != graph.NumVertices()) { permutation.resize(graph.NumVertices(), 0); for (uint32_t u(0); u < graph.NumVertices(); ++u) permutation[u] = u; } shuffle(permutation.begin(), permutation.end(), rnd); ++numperm; } const uint32_t sourceVertexId = permutation[vi]; uint16_t i = 0; if (numperm < permthresh) { do { i = distr(rnd); } while (processed[i][sourceVertexId]); } else { i = distr(rnd) % (l - numperm + 1); for (uint16_t j = 0; j < l; ++j) { if (!processed[j][sourceVertexId]) { if (i == 0) { i = j; break; } --i; } } } processed[i][sourceVertexId] = true; ++rank; // Increase value for rank. // Shortcut to some variables. vector<bool> &cov = covered[i]; vector<uint32_t> &invSketch = invSketches[make_pair(sourceVertexId, i)]; // Only process such ranks that are not yet covered. if (cov[sourceVertexId]) continue; // Perform the BFS. S0.Clear(); S0.Insert(sourceVertexId); uint32_t ind = 0; while (ind < S0.Size()) { uint32_t u = S0.KeyByIndex(ind++); ++sketchSizes[u]; invSketch.push_back(u); // pruning. if (sketchSizes[u] == k) { // Set the vertex and compute marginal influence. newSeed.VertexId = u; newSeed.EstimatedInfluence = static_cast<double>(k - 1) * static_cast<double>(graph.NumVertices()) / static_cast<double>(rank); break; } // arc expansion. FORALL_INCIDENT_ARCS_BACKWARD(graph, u, a) { if (!a->Backward()) break; const uint32_t v = a->OtherVertexId(); if (Contained<modelType>(v, u, i, l) && !cov[v] && !S0.IsContained(v)) //if (ContainedRandom<modelType>(v, u) && !cov[v] && !S0.IsContained(v)) S0.Insert(v); } } if (newSeed.VertexId != NullVertex) break; } // end sketch building. sketchms += timer.LiveElapsedMilliseconds(); newSeed.BuildSketchesElapsedMilliseconds = sketchms; if (verbose) cout << " done (u: " << newSeed.VertexId << ", est: " << newSeed.EstimatedInfluence << " r: " << rank << ", ms: " << newSeed.BuildSketchesElapsedMilliseconds << ")" << endl; // Out of new vertices... if (newSeed.VertexId == NullVertex) { if (verbose) cout << "GRAPH SATURATED (|S|=" << seedSet.size() << ", rank=" << rank << ")." << endl; if (verbose) cout << "Building buckets for the remaining vertices... " << flush; buck.resize(k); buckind.resize(graph.NumVertices(), 0); uint32_t num(0); FORALL_VERTICES(graph, u) { if (sketchSizes[u] > 0) { buckind[u] = uint32_t(buck[sketchSizes[u]].size()); buck[sketchSizes[u]].push_back(u); buckp = max<uint16_t>(buckp, sketchSizes[u]); ++num; } } if (verbose) cout << "done (" << num << " vertices)." << endl; saturated = true; } } if (saturated) { while (buckp > 0 && buck[buckp].empty()) --buckp; if (buckp == 0) { if (verbose) cout << endl << "TOTAL COVERAGE REACHED (|S|=" << seedSet.size() << ")." << endl; break; } // Select the next seed vertex as the one that has the highest number of things in the sketch. if (verbose) cout << "[" << seedSet.size() + 1 << "] Determining the vertex that has highest marginal influence... " << flush; Assert(!buck[buckp].empty()); newSeed.VertexId = buck[buckp].back(); newSeed.EstimatedInfluence = double(sketchSizes[newSeed.VertexId]) / l; newSeed.BuildSketchesElapsedMilliseconds = sketchms; if (verbose) cout << " done (u: " << newSeed.VertexId << ", est: " << newSeed.EstimatedInfluence << ")" << endl; } /* BFS computation on each instance to get the exact influence. Also updates the sketch sizes. */ if (verbose) cout << "[" << seedSet.size() + 1 << "] Computing influence... " << flush; timer.Start(); // Call sequential or parallel BFS to compute influences. if (runParallel) { #pragma omp parallel num_threads(numt) reduction(+ : exinfloc) { // Get thread id. const int32_t t = omp_get_thread_num(); // Shortcut to thread-local search spaces. auto &S = searchSpaces[t]; auto &Q = updateQueues[t]; Q.clear(); #pragma omp for for (int32_t i = 0; i < l; ++i) { // Shortcut to some variables. vector<bool> &cov = covered[i]; // Run a BFS. S.Clear(); if (!cov[newSeed.VertexId]) S.Insert(newSeed.VertexId); uint32_t ind = 0; while (ind < S.Size()) { uint32_t u = S.KeyByIndex(ind++); cov[u] = true; ++exinfloc; // Update counters and sketches. const pair<uint32_t, uint16_t> key(u, i); if (invSketches.count(key)) { Q.push_back(key); } FORALL_INCIDENT_ARCS(graph, u, a) { if (!a->Forward()) break; const uint32_t v = a->OtherVertexId(); if (Contained<modelType>(u, v, i, l) && !S.IsContained(v) && !cov[v]) S.Insert(v); } } } // end exact influence computation. } // end parallel section. // Update the counters. for (int32_t t = 0; t < numt; ++t) { vector<pair<uint32_t, uint16_t>> &Q = updateQueues[t]; for (const pair<uint32_t, uint16_t> &key : Q) { const vector<uint32_t> &invSketch = invSketches[key]; if (!saturated) { for (const uint32_t &v : invSketch) --sketchSizes[v]; } else { for (const uint32_t &v : invSketch) { uint16_t s = sketchSizes[v]; // Erase from bucket. buckind[buck[s].back()] = buckind[v]; swap(buck[s][buckind[v]], buck[s].back()); buck[s].pop_back(); if (sketchSizes[v] > 1) { buckind[v] = uint32_t(buck[sketchSizes[v] - 1].size()); buck[sketchSizes[v] - 1].push_back(v); } --sketchSizes[v]; } } invSketches.erase(key); } } } // end parallel branch. else { // begin sequential branch. for (int32_t i = 0; i < l; ++i) { // Shortcut to some variables. vector<bool> &cov = covered[i]; // Run a BFS. S0.Clear(); if (!cov[newSeed.VertexId]) S0.Insert(newSeed.VertexId); uint32_t ind = 0; while (ind < S0.Size()) { uint32_t u = S0.KeyByIndex(ind++); cov[u] = true; ++exinfloc; // Update counters and sketches. const pair<uint32_t, uint16_t> key(u, i); if (invSketches.count(key)) { const vector<uint32_t> &invSketch = invSketches[key]; if (!saturated) { for (const uint32_t &v : invSketch) --sketchSizes[v]; } else { for (const uint32_t &v : invSketch) { uint16_t s = sketchSizes[v]; // Erase from bucket. buckind[buck[s].back()] = buckind[v]; swap(buck[s][buckind[v]], buck[s].back()); buck[s].pop_back(); if (sketchSizes[v] > 1) { buckind[v] = uint32_t(buck[sketchSizes[v] - 1].size()); buck[sketchSizes[v] - 1].push_back(v); } --sketchSizes[v]; } } invSketches.erase(key); } FORALL_INCIDENT_ARCS(graph, u, a) { if (!a->Forward()) break; const uint32_t v = a->OtherVertexId(); if (Contained<modelType>(u, v, i, l) && !S0.IsContained(v) && !cov[v]) S0.Insert(v); } } } // end exact influence computation. } // end sequential branch. newSeed.ExactInfluence = double(exinfloc) / double(l); infms += timer.LiveElapsedMilliseconds(); newSeed.ComputeInfluenceElapsedMilliseconds = infms; estinf += newSeed.EstimatedInfluence; exinf += newSeed.ExactInfluence; seedSet.push_back(newSeed); if (verbose) cout << " done (inf: " << newSeed.ExactInfluence << ", ms: " << newSeed.ComputeInfluenceElapsedMilliseconds << ")." << endl; if (verbose) cout << endl; } // end greedy iteration. const double totalms = globalTimer.LiveElapsedMilliseconds(); // Compute the exact influence? This is not measured in the running time. if (lEval != 0) exinf = ComputeExactInfluence<modelType>(seedSet, lEval); /* Print results. */ if (verbose) cout << endl; graph.DumpStatistics(cout); cout << "Random seed: " << randomSeed << "." << endl << "Number of seed vertices computed: " << seedSet.size() << "." << endl << "Number of ranks used: " << rank << "." << endl << "Permutations computed: " << numperm << " (each of size: " << permutation.size() << ")." << endl << "Building sketches: " << sketchms / 1000.0 << " sec." << endl << "Computing influence: " << infms / 1000.0 << " sec." << endl << "Total time: " << totalms / 1000.0 << " sec." << endl << "Estimated spread of solution: " << estinf << " (" << (100.0*estinf / static_cast<double>(graph.NumVertices())) << " %)." << endl << "Exact spread of solution: " << exinf << " (" << (100.0*exinf / static_cast<double>(graph.NumVertices())) << " %)." << endl << "Quality gap: " << 100.0 * (1.0 - exinf / estinf) << " %" << endl; /* Dump statistics to a file. */ if (!statsFilename.empty()) { IO::FileStream file; file.OpenNewForWriting(statsFilename); if (file.IsOpen()) { stringstream ss; ss << "NumberOfVertices = " << graph.NumVertices() << endl << "NumberOfArcs = " << graph.NumArcs() / 2 << endl << "TotalEstimatedInfluence = " << estinf << endl << "TotalExactInfluence = " << exinf << endl << "TotalElapsedMilliseconds = " << totalms << endl << "SketchBuildingElapsedMilliseconds = " << sketchms << endl << "InfluenceComputationElapsedMilliseconds = " << infms << endl << "NumberOfRanksUsed = " << rank << endl << "NumberOfSeedVertices = " << seedSet.size() << endl << "RankComputationMethod = " << "shuffle" << endl << "NumberOfPermutationsComputed = " << numperm << endl; double sumEstimatedInfluence(0.0), sumExactInfluence(0.0); for (Types::IndexType i = 0; i < seedSet.size(); ++i) { sumEstimatedInfluence += seedSet[i].EstimatedInfluence; sumExactInfluence += seedSet[i].ExactInfluence; ss << i << "_MarginalEstimatedInfluence = " << seedSet[i].EstimatedInfluence << endl << i << "_CumulativeEstimatedInfluence = " << sumEstimatedInfluence << endl << i << "_MarginalExactInfluence = " << seedSet[i].ExactInfluence << endl << i << "_CumulativeExactInfluence = " << sumExactInfluence << endl << i << "_VertexId = " << seedSet[i].VertexId << endl << i << "_TotalElapsedMilliseconds = " << seedSet[i].BuildSketchesElapsedMilliseconds + seedSet[i].ComputeInfluenceElapsedMilliseconds << endl << i << "_SketchBuildingElapsedMilliseconds = " << seedSet[i].BuildSketchesElapsedMilliseconds << endl << i << "_InfluenceComputationElapsedMilliseconds = " << seedSet[i].ComputeInfluenceElapsedMilliseconds << endl; } file.WriteString(ss.str()); } } if (!coverageFilename.empty()) { IO::FileStream file; file.OpenNewForWriting(coverageFilename); if (file.IsOpen()) { stringstream ss; ss << graph.NumVertices() << endl; ss << seedSet.size() << endl; ss << seedSet.back().ComputeInfluenceElapsedMilliseconds + seedSet.back().BuildSketchesElapsedMilliseconds << endl; double sumExactInfluence(0.0); double elapsedMilliseconds(0.0); for (Types::IndexType i = 0; i < seedSet.size(); ++i) { sumExactInfluence += seedSet[i].ExactInfluence; elapsedMilliseconds = seedSet[i].BuildSketchesElapsedMilliseconds + seedSet[i].ComputeInfluenceElapsedMilliseconds; ss << seedSet[i].VertexId << "\t" << sumExactInfluence << "\t" << elapsedMilliseconds << endl; } file.WriteString(ss.str()); } } } protected: // This evaluates the influence using a separate BFS with a separate seed. template<ModelType modelType> inline double ComputeExactInfluence(vector<SeedType> &seedSet, const uint16_t l) { // This essentially runs a bunch of BFSes in all l instances, one from each // seed vertex. It then updates the exact influence value for the respective // seed. if (verbose) cout << "Allocating data structures... " << flush; DataStructures::Container::FastSet<uint32_t> searchSpace(graph.NumVertices()); vector<vector<bool>> marked(l); for (uint16_t i = 0; i < l; ++i) marked[i].resize(graph.NumVertices(), false); if (verbose) cout << "done." << endl; // For each seed vertex, perform a BFS in every instance, and count the sarch space sizes. if (verbose) cout << "Running BFSes to compute exact influence in " << l << " instances and " << seedSet.size() << " vertices:" << flush; double exinf(0); for (SeedType &s : seedSet) { uint64_t size = 0; for (uint16_t i = 0; i < l; ++i) { vector<bool> &m = marked[i]; if (m[s.VertexId]) continue; searchSpace.Clear(); searchSpace.Insert(s.VertexId); uint64_t cur = 0; while (cur < searchSpace.Size()) { const uint32_t u = searchSpace.KeyByIndex(cur++); m[u] = true; ++size; FORALL_INCIDENT_ARCS(graph, u, arc) { if (!arc->Forward()) continue; const uint32_t v = arc->OtherVertexId(); if (Contained<modelType>(u, v, i, l) && !m[v] && !searchSpace.IsContained(v)) searchSpace.Insert(v); } } } s.ExactInfluence = double(size) / double(l); exinf += s.ExactInfluence; if (verbose) cout << " " << s.ExactInfluence << flush; } if (verbose) cout << endl << "done (exinf=" << exinf << ")." << endl; return exinf; } protected: // Returns true if the (forward) arc from u to v is contained in instance i. template<ModelType modelType> inline bool Contained(const uint32_t u, const uint32_t v, const uint16_t i, const uint16_t l) { assert(false); return false; } //// Returns true if the (forward) arc from u to v is contained in instance i. //template<ModelType modelType> //inline bool ContainedRandom(const uint32_t u, const uint32_t v) { // assert(false); // return false; //} // A tailored Murmur hash 3 function for pair of vertices and instance. inline uint32_t Murmur3Hash(const uint32_t u, const uint32_t v, const uint16_t i, const uint16_t l) const { // Seed with our seed value. uint32_t h = (randomSeed << 16) + l; // Declare magic constants c1 and c2. const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; // Hash the first vertex. uint32_t k = u; k *= c1; k = _rotl(k, 15); k *= c2; h ^= k; h = _rotl(h, 13); h = h * 5 + 0xe6546b64; // Hash the second vertex. k = v; k *= c1; k = _rotl(k, 15); k *= c2; h ^= k; h = _rotl(h, 13); h = h * 5 + 0xe6546b64; // Hash the instance. k = static_cast<uint32_t>(i); k *= c1; k = _rotl(k, 15); k *= c2; h ^= k; // Mix the result. h ^= 10; // length of input in bytes. h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } private: // Indicates whether the algorithm procuces output. bool verbose = true; // This is the random seed. uint32_t randomSeed; // This is the graph we are using GraphType &graph; // The resolution for integer probabilities using the hash function. const uint32_t resolution; // The degrees of each vertex. vector<ArcIdType> indeg; // The binary probability. uint32_t binprob; // The trivalency probabilities. const array<uint32_t, 3> triprob; // Random distribution. //uniform_int_distribution<uint32_t> dis; // Random number generator. //mt19937 gen; }; template<> inline bool SKIM::Contained<SKIM::WEIGHTED>(const uint32_t u, const uint32_t v, const uint16_t i, const uint16_t l) { uint32_t prob = min(resolution, resolution / indeg[v]); return (Murmur3Hash(u, v, i, l) % resolution) < prob; } template<> inline bool SKIM::Contained<SKIM::BINARY>(const uint32_t u, const uint32_t v, const uint16_t i, const uint16_t l) { return (Murmur3Hash(u, v, i, l) % resolution) < binprob; } template<> inline bool SKIM::Contained<SKIM::TRIVALENCY>(const uint32_t u, const uint32_t v, const uint16_t i, const uint16_t l) { const uint32_t index = (Murmur3Hash(u, v, i, l) % triprob.size()); return (Murmur3Hash(u, v, i, l) % resolution) < triprob[index]; } } }
GB_binop__pow_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp32) // C=scalar+B GB (_bind1st__pow_fp32) // C=scalar+B' GB (_bind1st_tran__pow_fp32) // C=A+scalar GB (_bind2nd__pow_fp32) // C=A'+scalar GB (_bind2nd_tran__pow_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = GB_powf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_powf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP32 || GxB_NO_POW_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = GB_powf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = GB_powf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_powf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_powf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ccode_base.h
void base_3motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters, std::vector<std::vector<uint8_t>> &ccodes) { #pragma omp parallel for schedule(dynamic,1) for (vidType v0 = 0; v0 < g.V(); v0++) { auto tid = omp_get_thread_num(); auto &counter = global_counters.at(tid); //auto &local_ccodes = ccodes[tid]; //update_ccodes(0, g, v0, local_ccodes); VertexSet y0 = g.N(v0); for (auto v1 : y0) { VertexSet y1 = g.N(v1); counter[0] += difference_num(y0, y1, v1); if (v1 < v0) { counter[1] += intersection_num(y0, y1, v1); } } } } void ccode_3motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters, std::vector<std::vector<uint8_t>> &ccodes) { #pragma omp parallel for schedule(dynamic,1) for (vidType v0 = 0; v0 < g.V(); v0++) { auto tid = omp_get_thread_num(); auto &counter = global_counters.at(tid); auto &local_ccodes = ccodes[tid]; VertexSet y0 = g.N(v0); uint64_t local_counter_0 = 0; uint64_t local_counter_1 = 0; for (auto u : g.N(v0)) { if (u > v0) break; local_ccodes[u] = 1; } for (auto v1 : y0) { VertexSet y1 = g.N(v1); for (auto v2 : y1) { if (v2 >= v0) break; if (local_ccodes[v2] == 0) local_counter_0 ++; } if (v1 < v0) { for (auto v2 : y1) { if (v2 > v1) break; if (local_ccodes[v2] == 1) local_counter_1 ++; } } } for (auto u : g.N(v0)) { if (u > v0) break; local_ccodes[u] = 0; } counter[0] += local_counter_0; counter[1] += local_counter_1; } } void base_4motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters, std::vector<std::vector<uint8_t>> &ccodes) { std::cout << "Ad-hoc 4-motif counting\n"; #pragma omp parallel { auto &counter = global_counters.at(omp_get_thread_num()); #pragma omp for schedule(dynamic,1) for (vidType v0 = 0; v0 < g.V(); v0++) { VertexSet y0 = g.N(v0); for (auto v1 : y0) { VertexSet y1 = g.N(v1); VertexSet y0n1f1 = difference_set(y0, y1, v1); for(vidType idx2 = 0; idx2 < y0n1f1.size(); idx2++) { vidType v2 = y0n1f1.begin()[idx2]; VertexSet y2 = g.N(v2); counter[0] += difference_num(y0n1f1, y2, v2); // 3-star } } for (auto v1 : y0) { if (v1 > v0) break; VertexSet y1 = g.N(v1); VertexSet y0y1 = intersection_set(y0, y1); for (auto v2 : y0y1) { VertexSet y2 = g.N(v2); counter[4] += difference_num(y0y1, y2, v2); // diamond VertexSet n0n1y2; difference_set(n0n1y2, y2, y0); counter[2] += difference_num(n0n1y2, y1); // tailed-triangle if (v2 > v1) continue; counter[5] += intersection_num(y0y1, y2, v2); // 4-clique } VertexSet n0y1; difference_set(n0y1, y1, y0); for (auto v2 : difference_set(y0, y1)) { VertexSet y2 = g.N(v2); counter[1] += difference_num(n0y1, y2); // 4-path } //VertexSet n0f0y1; difference_set(n0f0y1, y1, y0); for (auto v2 : difference_set(y0, y1, v1)) { VertexSet y2 = g.N(v2); counter[3] += intersection_num(n0y1, y2, v0); // 4-cycle } } } } } void ccode_4motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters, std::vector<std::vector<uint8_t>> &ccodes) { std::cout << "Ad-hoc 4-motif counting\n"; #pragma omp parallel { auto tid = omp_get_thread_num(); auto &counter = global_counters.at(tid); auto &local_ccodes = ccodes[tid]; #pragma omp for schedule(dynamic,1) for(vidType v0 = 0; v0 < g.V(); v0++) { VertexSet y0 = g.N(v0); update_ccodes(0, g, v0, local_ccodes); for (auto v1 : y0) { VertexSet y1 = g.N(v1); VertexSet y0n1f1 = difference_set(y0, y1, v1); for(vidType idx2 = 0; idx2 < y0n1f1.size(); idx2++) { vidType v2 = y0n1f1.begin()[idx2]; VertexSet y2 = g.N(v2); counter[0] += difference_num(y0n1f1, y2, v2); // 3-star } } for (auto v1 : y0) { if (v1 > v0) break; update_ccodes(1, g, v1, local_ccodes); VertexSet y1 = g.N(v1); VertexSet y0y1 = intersection_set(y0, y1); for (auto v2 : y0y1) { update_ccodes(2, g, v2, local_ccodes, v2); for (auto v3 : y0y1) { if (v3 < v2 && local_ccodes[v3] == 3) counter[4] ++; // diamond } VertexSet y2 = g.N(v2); for (auto v3 : y2) { if((local_ccodes[v3] & 3) == 0 && v3 != v0 && v3 != v1) counter[2] ++; // tailed-triangle } if (v2 < v1) { for (auto v3 : y0y1) { if (v3 > v2) break; if (local_ccodes[v3] == 7) counter[5] ++; // 4-clique } } resume_ccodes(2, g, v2, local_ccodes, v2); } VertexSet n0y1; difference_set(n0y1, y1, y0); for (auto v2 : y0) { if (local_ccodes[v2] == 1) { VertexSet y2 = g.N(v2); counter[1] += difference_num(n0y1, y2); // 4-path } } //VertexSet n0f0y1; difference_set(n0f0y1, y1, y0); //for (auto v2 : difference_set(y0, y1, v1)) { for (auto v2 : y0) { if (v2 < v1 && local_ccodes[v2] == 1) { VertexSet y2 = g.N(v2); counter[3] += intersection_num(n0y1, y2, v0); // 4-cycle } } resume_ccodes(1, g, v1, local_ccodes); } resume_ccodes(0, g, v0, local_ccodes); } } } void ccode_kmotif(Graph &g, unsigned k, std::vector<std::vector<uint64_t>> &counters, std::vector<std::vector<uint8_t>> &ccodes) { #ifdef USE_CMAP if (k == 3) ccode_3motif(g, counters, ccodes); else if (k == 4) ccode_4motif(g, counters, ccodes); #else if (k == 3) base_3motif(g, counters, ccodes); else if (k == 4) base_4motif(g, counters, ccodes); #endif else return; }
GB_unop__identity_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int64_fp32 // op(A') function: GB_unop_tran__identity_int64_fp32 // C type: int64_t // A type: float // cast: int64_t cij = GB_cast_to_int64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int64_fp32 ( int64_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
zbuffer.c
/* * Z buffer: 16 bits Z / 16 bits color * */ #include <stdlib.h> #include <string.h> #include "../include/zbuffer.h" #include "msghandling.h" ZBuffer* ZB_open(GLint xsize, GLint ysize, GLint mode, void* frame_buffer) { ZBuffer* zb; GLint size; zb = gl_malloc(sizeof(ZBuffer)); if (zb == NULL) return NULL; zb->xsize = xsize & ~3; zb->ysize = ysize; zb->linesize = (xsize * PSZB); switch (mode) { #if TGL_FEATURE_32_BITS == 1 case ZB_MODE_RGBA: break; #endif #if TGL_FEATURE_16_BITS == 1 case ZB_MODE_5R6G5B: break; #endif default: goto error; } size = zb->xsize * zb->ysize * sizeof(GLushort); zb->zbuf = gl_malloc(size); if (zb->zbuf == NULL) goto error; if (frame_buffer == NULL) { zb->pbuf = gl_malloc(zb->ysize * zb->linesize); if (zb->pbuf == NULL) { gl_free(zb->zbuf); goto error; } zb->frame_buffer_allocated = 1; } else { zb->frame_buffer_allocated = 0; zb->pbuf = frame_buffer; } zb->current_texture = NULL; return zb; error: gl_free(zb); return NULL; } void ZB_close(ZBuffer* zb) { if (zb->frame_buffer_allocated) gl_free(zb->pbuf); gl_free(zb->zbuf); gl_free(zb); } void ZB_resize(ZBuffer* zb, void* frame_buffer, GLint xsize, GLint ysize) { GLint size; /* xsize must be a multiple of 4 */ xsize = xsize & ~3; zb->xsize = xsize; zb->ysize = ysize; zb->linesize = (xsize * PSZB); size = zb->xsize * zb->ysize * sizeof(GLushort); gl_free(zb->zbuf); zb->zbuf = gl_malloc(size); if (zb->zbuf == NULL) exit(1); if (zb->frame_buffer_allocated) gl_free(zb->pbuf); if (frame_buffer == NULL) { zb->pbuf = gl_malloc(zb->ysize * zb->linesize); if (!zb->pbuf) exit(1); zb->frame_buffer_allocated = 1; } else { zb->pbuf = frame_buffer; zb->frame_buffer_allocated = 0; } } #if TGL_FEATURE_32_BITS == 1 PIXEL pxReverse32(PIXEL x) { return ((x & 0xFF000000) >> 24) | /*______AA*/ ((x & 0x00FF0000) >> 8) | /*____RR__*/ ((x & 0x0000FF00) << 8) | /*__GG____*/ ((x & 0x000000FF) << 24); /* BB______*/ } #endif static void ZB_copyBuffer(ZBuffer* zb, void* buf, GLint linesize) { GLint y, i; #if TGL_FEATURE_MULTITHREADED_ZB_COPYBUFFER == 1 #pragma omp parallel for for (y = 0; y < zb->ysize; y++) { PIXEL* q; GLubyte* p1; q = zb->pbuf + y * zb->xsize; p1 = (GLubyte*)buf + y * linesize; #if TGL_FEATURE_NO_COPY_COLOR == 1 for (i = 0; i < zb->xsize; i++) { if ((*(q + i) & TGL_COLOR_MASK) != TGL_NO_COPY_COLOR) *(((PIXEL*)p1) + i) = *(q + i); } #else memcpy(p1, q, linesize); #endif } #else for (y = 0; y < zb->ysize; y++) { PIXEL* q; GLubyte* p1; q = zb->pbuf + y * zb->xsize; p1 = (GLubyte*)buf + y * linesize; #if TGL_FEATURE_NO_COPY_COLOR == 1 for (i = 0; i < zb->xsize; i++) { if ((*(q + i) & TGL_COLOR_MASK) != TGL_NO_COPY_COLOR) *(((PIXEL*)p1) + i) = *(q + i); } #else memcpy(p1, q, linesize); #endif } #endif } #if TGL_FEATURE_RENDER_BITS == 16 /* 32 bpp copy */ /* #ifdef TGL_FEATURE_32_BITS #define RGB16_TO_RGB32(p0,p1,v)\ {\ GLuint g,b,gb;\ g = (v & 0x07E007E0) << 5;\ b = (v & 0x001F001F) << 3;\ gb = g | b;\ p0 = (gb & 0x0000FFFF) | ((v & 0x0000F800) << 8);\ p1 = (gb >> 16) | ((v & 0xF8000000) >> 8);\ } static void ZB_copyFrameBufferRGB32(ZBuffer * zb, void *buf, GLint linesize) { GLushort *q; GLuint *p, *p1, v, w0, w1; GLint y, n; q = zb->pbuf; p1 = (GLuint *) buf; for (y = 0; y < zb->ysize; y++) { p = p1; n = zb->xsize >> 2; do { v = *(GLuint *) q; RGB16_TO_RGB32(w1, w0, v); p[0] = w0; p[1] = w1; v = *(GLuint *) (q + 2); RGB16_TO_RGB32(w1, w0, v); p[2] = w0; p[3] = 0; q += 4; p += 4; } while (--n > 0); p1 += linesize; } } */ #endif /* 24 bit packed pixel handling */ #ifdef TGL_FEATURE_24_BITS /* order: RGBR GBRG BRGB */ /* XXX: packed pixel 24 bit support not tested */ /* XXX: big endian case not optimised */ /* #if BYTE_ORDER == BIG_ENDIAN #define RGB16_TO_RGB24(p0,p1,p2,v1,v2)\ {\ GLuint r1,g1,b1,gb1,g2,b2,gb2;\ v1 = (v1 << 16) | (v1 >> 16);\ v2 = (v2 << 16) | (v2 >> 16);\ r1 = (v1 & 0xF800F800);\ g1 = (v1 & 0x07E007E0) << 5;\ b1 = (v1 & 0x001F001F) << 3;\ gb1 = g1 | b1;\ p0 = ((gb1 & 0x0000FFFF) << 8) | (r1 << 16) | (r1 >> 24);\ g2 = (v2 & 0x07E007E0) << 5;\ b2 = (v2 & 0x001F001F) << 3;\ gb2 = g2 | b2;\ p1 = (gb1 & 0xFFFF0000) | (v2 & 0xF800) | ((gb2 >> 8) & 0xff);\ p2 = (gb2 << 24) | ((v2 & 0xF8000000) >> 8) | (gb2 >> 16);\ } #else #define RGB16_TO_RGB24(p0,p1,p2,v1,v2)\ {\ GLuint r1,g1,b1,gb1,g2,b2,gb2;\ r1 = (v1 & 0xF800F800);\ g1 = (v1 & 0x07E007E0) << 5;\ b1 = (v1 & 0x001F001F) << 3;\ gb1 = g1 | b1;\ p0 = ((gb1 & 0x0000FFFF) << 8) | (r1 << 16) | (r1 >> 24);\ g2 = (v2 & 0x07E007E0) << 5;\ b2 = (v2 & 0x001F001F) << 3;\ gb2 = g2 | b2;\ p1 = (gb1 & 0xFFFF0000) | (v2 & 0xF800) | ((gb2 >> 8) & 0xff);\ p2 = (gb2 << 24) | ((v2 & 0xF8000000) >> 8) | (gb2 >> 16);\ } #endif */ /* static void ZB_copyFrameBufferRGB24(ZBuffer * zb, void *buf, GLint linesize) { GLushort *q; GLuint *p, *p1, w0, w1, w2, v0, v1; GLint y, n; q = zb->pbuf; p1 = (GLuint *) buf; linesize = linesize * 3; for (y = 0; y < zb->ysize; y++) { p = p1; n = zb->xsize >> 2; do { v0 = *(GLuint *) q; v1 = *(GLuint *) (q + 2); RGB16_TO_RGB24(w0, w1, w2, v0, v1); p[0] = w0; p[1] = w1; p[2] = w2; q += 4; p += 3; } while (--n > 0); *((GLbyte *) p1) += linesize; } } */ #endif #if TGL_FEATURE_RENDER_BITS == 16 void ZB_copyFrameBuffer(ZBuffer* zb, void* buf, GLint linesize) { ZB_copyBuffer(zb, buf, linesize); } #endif /*^ TGL_FEATURE_RENDER_BITS == 16 */ #if TGL_FEATURE_RENDER_BITS == 32 #define RGB32_TO_RGB16(v) (((v >> 8) & 0xf800) | (((v) >> 5) & 0x07e0) | (((v)&0xff) >> 3)) void ZB_copyFrameBuffer(ZBuffer* zb, void* buf, GLint linesize) { ZB_copyBuffer(zb, buf, linesize); } #endif /* ^TGL_FEATURE_RENDER_BITS == 32 */ /* * adr must be aligned on an 'int' */ static void memset_s(void* adr, GLint val, GLint count) { GLint i, n, v; GLuint* p; GLushort* q; p = adr; v = val | (val << 16); n = count >> 3; for (i = 0; i < n; i++) { p[0] = v; p[1] = v; p[2] = v; p[3] = v; p += 4; } q = (GLushort*)p; n = count & 7; for (i = 0; i < n; i++) *q++ = val; } /* Used in 32 bit mode*/ static void memset_l(void* adr, GLint val, GLint count) { GLint i, n, v; GLuint* p; p = adr; v = val; n = count >> 2; for (i = 0; i < n; i++) { p[0] = v; p[1] = v; p[2] = v; p[3] = v; p += 4; } n = count & 3; for (i = 0; i < n; i++) *p++ = val; } void ZB_clear(ZBuffer* zb, GLint clear_z, GLint z, GLint clear_color, GLint r, GLint g, GLint b) { GLuint color; GLint y; PIXEL* pp; if (clear_z) { memset_s(zb->zbuf, z, zb->xsize * zb->ysize); } if (clear_color) { pp = zb->pbuf; for (y = 0; y < zb->ysize; y++) { #if TGL_FEATURE_RENDER_BITS == 15 || TGL_FEATURE_RENDER_BITS == 16 // color = RGB_TO_PIXEL(r, g, b); #if TGL_FEATURE_FORCE_CLEAR_NO_COPY_COLOR color = TGL_NO_COPY_COLOR; #else color = RGB_TO_PIXEL(r, g, b); #endif memset_s(pp, color, zb->xsize); #elif TGL_FEATURE_RENDER_BITS == 32 #if TGL_FEATURE_FORCE_CLEAR_NO_COPY_COLOR color = TGL_NO_COPY_COLOR; #else color = RGB_TO_PIXEL(r, g, b); #endif memset_l(pp, color, zb->xsize); #else #error BADJUJU #endif pp = (PIXEL*)((GLbyte*)pp + zb->linesize); } } }
GB_binop__pow_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ContainerOps.h
/*************************************************************************** * Copyright (C) 2007 by F. P. Beekhof * * fpbeekhof@gmail.com * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #ifndef CVMLCPP_CONTAINER_OPS #define CVMLCPP_CONTAINER_OPS 1 #include <cmath> #ifdef __APPLE__ #include <type_traits> #else #include <tr1/type_traits> #endif #include "../base/Enums" #include "../base/Meta" #include "../base/use_omp.h" #include "../../omptl/omptl_algorithm" namespace cvmlcpp { namespace detail { /* * Vector Ops "+=" "-=" etc. */ // Vector-Scalar Ops template <class Container, class Op, bool is_arithmetic> struct _doOpIs { template <class RHS> static const Container &execute(Container &lhs, const RHS &rhs) { std::transform(lhs.begin(), lhs.end(), lhs.begin(), std::bind2nd(Op(), rhs)); return lhs; } }; // Vector-Vector ops template <class Container, class Op> struct _doOpIs<Container, Op, false> { template <class RHS> static const Container &execute(Container &lhs, const RHS &rhs) { std::transform(lhs.begin(), lhs.end(), rhs.begin(), lhs.begin(), Op()); return lhs; } }; template <class Container, class Op> struct doOpIs { template <class RHS> static const Container &execute(Container &lhs, const RHS &rhs) { return _doOpIs<Container, Op, std::tr1::is_arithmetic<RHS>::value>:: execute(lhs, rhs); } }; template <class Container, class Op, bool is_arithmetic> struct _doOp { template <class RHS> static const Container execute(const Container &lhs, const RHS &rhs) { Container result = lhs; std::transform(result.begin(), result.end(), result.begin(), std::bind2nd(Op(), rhs)); return result; } template <class RHS> static const Container execute(const RHS &lhs, const Container &rhs) { Container result = rhs; std::transform(result.begin(), result.end(), result.begin(), std::bind1st(Op(), lhs)); return result; } }; template <class Container, class Op> struct _doOp<Container, Op, false> { template <class RHS> static const Container execute(const Container &lhs, const RHS &rhs) { assert(lhs.size() == rhs.size()); Container result = lhs; std::transform(result.begin(), result.end(), rhs.begin(), result.begin(), Op()); return result; } template <class LHS> static const Container execute(const LHS &lhs, const Container &rhs) { Container result = rhs; std::transform(lhs.begin(), lhs.end(), result.begin(), result.begin(), Op()); return result; } static const Container execute(const Container &lhs, const Container &rhs) { Container result = rhs; std::transform(lhs.begin(), lhs.end(), result.begin(), result.begin(), Op()); return result; } }; /* * Vector Ops "+" "-" etc. */ template <class Container, class Op> struct doOp { template <class RHS> static const Container execute(const Container &lhs, const RHS &rhs) { return _doOp<Container, Op, std::tr1::is_arithmetic<RHS>::value>:: execute(lhs, rhs); } template <class LHS> static const Container execute(const LHS &lhs, const Container &rhs) { return _doOp<Container, Op, std::tr1::is_arithmetic<LHS>::value>:: execute(lhs, rhs); } static const Container execute(const Container &lhs, const Container &rhs) { return _doOp<Container, Op, false>::execute(lhs, rhs); } }; /* * Matrix-Vector Ops */ template <template <typename Tm, std::size_t Dm, typename A> class Array_t, typename Ta, typename Aux, class Vector_t, class RVector_t> void mat_vec_mult(const Array_t<Ta, 2, Aux> &A, const Vector_t &v, RVector_t &r) { typedef cvmlcpp::array_traits<Array_t, Ta, 2, Aux> ATraits; const std::size_t N = r.size(); // Dimensions must match assert(N == ATraits::shape(A)[0]); #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < int(N); ++i) { assert(std::size_t(std::distance(A[i].begin(),A[i].end())) == v.size()); r[i] = std::inner_product(A[i].begin(), A[i].end(), v.begin(), 0.0); } } /* * Matrix-Matrix ops */ template <template<typename T, std::size_t N, typename Aux> class ArrayLhs, template<typename T, std::size_t N, typename Aux> class ArrayRhs, template<typename T, std::size_t N, typename Aux> class ArrayRes, typename Tlhs, typename Trhs, typename Tres, typename AuxLhs, typename AuxRhs, typename AuxRes> void mat_mat_mult(const ArrayLhs<Tlhs, 2, AuxLhs> &lhs, const ArrayRhs<Trhs, 2, AuxRhs> &rhs, ArrayRes<Tres, 2, AuxRes> &res) { typedef cvmlcpp::array_traits<ArrayLhs, Tlhs, 2, AuxLhs> LhsTraits; typedef cvmlcpp::array_traits<ArrayRhs, Trhs, 2, AuxRhs> RhsTraits; typedef cvmlcpp::array_traits<ArrayRes, Tres, 2, AuxRes> ResTraits; // Matrix dimensions must match assert(LhsTraits::shape(lhs)[Y] == RhsTraits::shape(rhs)[X]); const std::size_t N = LhsTraits::shape(lhs)[Y]; const std::size_t dims [] = { LhsTraits::shape(lhs)[X], RhsTraits::shape(rhs)[Y] }; ResTraits::resize(res, dims); const std::size_t Nx = dims[X]; const std::size_t Ny = dims[Y]; #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < int(Nx); ++i) for (std::size_t j = 0; j < Ny; ++j) { Tres sum = 0.0; for (std::size_t k = 0; k < N; ++k) sum += lhs[i][k] * rhs[k][j]; res[i][j] = sum; } } template <class Operator> struct _MatMatOp { template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename AuxLhs, typename AuxRhs, std::size_t N> static Array_t<Tm, N, AuxLhs> matMatOp( const Array_t<Tm, N, AuxLhs> &lhs, const Array_t<Tm, N, AuxRhs> &rhs) { typedef cvmlcpp::array_traits<Array_t, Tm, N, AuxLhs> ATLHS; typedef cvmlcpp::array_traits<Array_t, Tm, N, AuxRhs> ATRHS; assert(std::equal(ATLHS::shape(lhs), ATLHS::shape(lhs)+N, ATRHS::shape(rhs))); Array_t<Tm, N, AuxLhs> result = ATLHS::create(ATLHS::shape(lhs)); omptl::transform(ATLHS::begin(lhs), ATLHS::end(lhs), ATRHS::begin(rhs), ATLHS::begin(result), Operator()); return result; } }; /* * Matrix-Scalar ops. */ template <class Operator> struct _MatOp { template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> static Array_t<Tm, N, A> doOp(const Array_t<Tm, N, A> &m, const Tv v) { typedef cvmlcpp::array_traits<Array_t, Tm, N, A> AT; Array_t<Tm, N, A> result = AT::create(AT::shape(m)); omptl::transform(m.begin(), m.end(), result.begin(), std::bind2nd(Operator(), v)); return result; } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> static Array_t<Tm, N, A> doOp(const Tv v, const Array_t<Tm, N, A> &m) { typedef cvmlcpp::array_traits<Array_t, Tm, N, A> AT; Array_t<Tm, N, A> result = AT::create(AT::shape(m)); omptl::transform(m.begin(), m.end(), result.begin(), std::bind1st(Operator(), v)); return result; } }; } // End Namespace detail } // End Namespace cvmlcpp /* * Matrix-Matrix Ops */ template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename AuxLhs, typename AuxRhs> Array_t<Tm, 2, AuxLhs> operator*(const Array_t<Tm, 2, AuxLhs> &lhs, const Array_t<Tm, 2, AuxRhs> &rhs) { Array_t<Tm, 2, AuxLhs> result; cvmlcpp::detail::mat_mat_mult(lhs, rhs, result); return result; } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename AuxLhs, typename AuxRhs, std::size_t N> Array_t<Tm, N, AuxLhs> operator+(const Array_t<Tm, N, AuxLhs> &lhs, const Array_t<Tm, N, AuxRhs> &rhs) { return cvmlcpp::detail::_MatMatOp<std::plus<Tm> >::matMatOp(lhs, rhs); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename AuxLhs, typename AuxRhs, std::size_t N> Array_t<Tm, N, AuxLhs> operator-(const Array_t<Tm, N, AuxLhs> &lhs, const Array_t<Tm, N, AuxRhs> &rhs) { return cvmlcpp::detail::_MatMatOp<std::minus<Tm> >::matMatOp(lhs, rhs); } /* * Matrix-Vector Ops */ template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> cvmlcpp::DynamicVector<Tv> operator*(const Array_t<Tm, 2, A> &m, const cvmlcpp::StaticVector<Tv, N> &v) { using namespace cvmlcpp; typedef array_traits<Array_t, Tm, 2, A>ArrayTraits; // Users: the dimensionality of the matrix must equal the vector length assert(ArrayTraits::shape(m)[1] == N); DynamicVector<Tv> result( (ArrayTraits::shape(m)[0]) ); detail::mat_vec_mult(m, v, result); return result; } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A> cvmlcpp::DynamicVector<Tv> operator*(const Array_t<Tm, 2, A> &m, const cvmlcpp::DynamicVector<Tv> &v) { using namespace cvmlcpp; typedef cvmlcpp::array_traits<Array_t, Tm, 2, A>ArrayTraits; // Users: the dimensionality of the matrix must equal the vector length assert(ArrayTraits::shape(m)[1] == v.size()); DynamicVector<Tv> result( (ArrayTraits::shape(m)[0]) ); detail::mat_vec_mult(m, v, result); return result; } /* * Matrix-Scalar Ops */ template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator+(const Array_t<Tm, N, A> &m, const Tv v) { return cvmlcpp::detail::_MatOp<std::plus<Tm> >::doOp(m, v); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator-(const Array_t<Tm, N, A> &m, const Tv v) { return cvmlcpp::detail::_MatOp<std::minus<Tm> >::doOp(m, v); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator*(const Array_t<Tm, N, A> &m, const Tv v) { return cvmlcpp::detail::_MatOp<std::multiplies<Tm> >::doOp(m, v); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator/(const Array_t<Tm, N, A> &m, const Tv v) { return cvmlcpp::detail::_MatOp<std::divides<Tm> >::doOp(m, v); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator+(const Tv v, const Array_t<Tm, N, A> &m) { return cvmlcpp::detail::_MatOp<std::plus<Tm> >::doOp(v, m); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator-(const Tv v, const Array_t<Tm, N, A> &m) { return cvmlcpp::detail::_MatOp<std::minus<Tm> >::doOp(v, m); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator*(const Tv v, const Array_t<Tm, N, A> &m) { return cvmlcpp::detail::_MatOp<std::multiplies<Tm> >::doOp(v, m); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> operator/(const Tv v, const Array_t<Tm, N, A> &m) { return cvmlcpp::detail::_MatOp<std::divides<Tm> >::doOp(v, m); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> &operator+=(Array_t<Tm, N, A> &m, const Tv v) { m = m + v; return m; } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> &operator-=(Array_t<Tm, N, A> &m, const Tv v) { m = m - v; return m; } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> &operator*=(Array_t<Tm, N, A> &m, const Tv v) { m = m * v; return m; } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename Tv, typename A, std::size_t N> Array_t<Tm, N, A> &operator/=(Array_t<Tm, N, A> &m, const Tv v) { m = m / v; return m; } /* * Matrix Ops "+=" "-=" etc. */ template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename AuxLhs, typename AuxRhs, std::size_t N> void operator+=(Array_t<Tm, N, AuxLhs> &lhs, const Array_t<Tm, N, AuxRhs> &rhs) { typedef cvmlcpp::array_traits<Array_t, Tm, N, AuxLhs> LHSTraits; typedef cvmlcpp::array_traits<Array_t, Tm, N, AuxRhs> RHSTraits; assert(equal(LHSTraits::shape(lhs), LHSTraits::shape(lhs)+N, RHSTraits::shape(rhs))); omptl::transform(lhs.begin(), lhs.end(), rhs.begin(), lhs.begin(), std::plus<Tm>()); } template <template<typename T, std::size_t N, typename Aux> class Array_t, typename Tm, typename AuxLhs, typename AuxRhs, std::size_t N> void operator-=(Array_t<Tm, N, AuxLhs> &lhs, const Array_t<Tm, N, AuxRhs> &rhs) { typedef cvmlcpp::array_traits<Array_t, Tm, N, AuxLhs> LHSTraits; typedef cvmlcpp::array_traits<Array_t, Tm, N, AuxRhs> RHSTraits; assert(equal(LHSTraits::shape(lhs), LHSTraits::shape(lhs)+N, RHSTraits::shape(rhs))); omptl::transform(lhs.begin(), lhs.end(), rhs.begin(), lhs.begin(), std::minus<Tm>()); } /* * Operations on containers */ namespace cvmlcpp { #define CVMLCPP_UNARY_FUNCTOR(F) \ namespace detail { template <typename T> struct unary_fun_##F { T operator()(const T x) const { return std::F(x); } }; } #define CVMLCPP_STATIC_VECTOR_UNARY_FUNC(F) \ template <template <typename TT, std::size_t DD> class Vect, typename T, std::size_t D>\ Vect<T, D> F(const Vect<T, D> &x) \ { \ Vect<T, D> y; \ omptl::transform(x.begin(), x.end(), y.begin(), /*cvmlcpp::*/detail::unary_fun_##F<T>() ); \ return y; \ } \ template <template <typename TT> class Vect, typename T>\ Vect<T> F(const Vect<T> &x) \ { \ Vect<T> y; \ omptl::transform(x.begin(), x.end(), y.begin(), /*cvmlcpp::*/detail::unary_fun_##F<T>() ); \ return y; \ } \ #define CVMLCPP_CONTAINER_OPS_UNARY_FUNC(F) CVMLCPP_UNARY_FUNCTOR(F) CVMLCPP_STATIC_VECTOR_UNARY_FUNC(F) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(abs) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(sin) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(cos) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(tan) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(asin) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(acos) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(atan) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(norm) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(ceil) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(cosh) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(sinh) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(tanh) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(floor) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(log) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(log10) CVMLCPP_CONTAINER_OPS_UNARY_FUNC(sqrt) //CVMLCPP_CONTAINER_OPS_UNARY_FUNC() #define CVMLCPP_BINARY_FUNCTOR(F) \ /*namespace cvmlcpp { */namespace detail { template <typename T> struct binary_fun_##F { T operator()(const T x, const T y) const { return std::F(x, y); } }; } #define CVMLCPP_STATIC_VECTOR_BINARY_FUNC(F) \ template <template <typename TT, std::size_t DD> class Vect, typename T, std::size_t D>\ Vect<T, D> F(const Vect<T, D> &x, const Vect<T, D> &y) \ { \ Vect<T, D> z; \ omptl::transform(x.begin(), x.end(), y.begin(), z.begin(), /*cvmlcpp::*/detail::binary_fun_##F<T>() ); \ return z; \ } \ template <template <typename TT> class Vect, typename T>\ Vect<T> F(const Vect<T> &x, const Vect<T> &y) \ { \ Vect<T> z; \ omptl::transform(x.begin(), x.end(), y.begin(), z.begin(), /*cvmlcpp::*/detail::binary_fun_##F<T>() ); \ return z; \ } #define CVMLCPP_CONTAINER_OPS_BINARY_FUNC(F) CVMLCPP_BINARY_FUNCTOR(F) CVMLCPP_STATIC_VECTOR_BINARY_FUNC(F) CVMLCPP_CONTAINER_OPS_BINARY_FUNC(min) CVMLCPP_CONTAINER_OPS_BINARY_FUNC(max) CVMLCPP_CONTAINER_OPS_BINARY_FUNC(atan2) CVMLCPP_CONTAINER_OPS_BINARY_FUNC(exp) //CVMLCPP_CONTAINER_OPS_BINARY_FUNC(mod) CVMLCPP_CONTAINER_OPS_BINARY_FUNC(pow) //CVMLCPP_CONTAINER_OPS_BINARY_FUNC() #define CVMLCPP_CONTAINER_OPS_ARG_FUNC(F) \ template <template <typename TT, std::size_t DD> class Vect, typename T, std::size_t D, typename U>\ Vect<T, D> F(const Vect<T, D> &x, const U &arg) \ { \ Vect<T, D> y; \ omptl::transform(x.begin(), x.end(), y.begin(), \ std::bind2nd(/*cvmlcpp::*/detail::binary_fun_##F<T>(), arg)); \ return y; \ } \ template <template <typename TT> class Vect, typename T, typename U>\ Vect<T> F(const Vect<T> &x, const U &arg) \ { \ Vect<T> y; \ omptl::transform(x.begin(), x.end(), y.begin(), \ std::bind2nd(/*cvmlcpp::*/detail::binary_fun_##F<T>(), arg)); \ return y; \ } CVMLCPP_CONTAINER_OPS_ARG_FUNC(exp) //CVMLCPP_CONTAINER_OPS_ARG_FUNC(mod) CVMLCPP_CONTAINER_OPS_ARG_FUNC(pow) //CVMLCPP_CONTAINER_OPS_ARG_FUNC() } // end namespace #endif
convolution_pack1to8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float16x8_t _sum = vdupq_n_f16((__fp16)0.f); if (bias_data_ptr) { _sum = vld1q_f16(bias_data_ptr + p * 8); } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { float16x8_t _val = vdupq_n_f16(sptr[space_ofs[k]]); float16x8_t _w = vld1q_f16(kptr); _sum = vfmaq_f16(_sum, _val, _w); kptr += 8; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1q_f16(outptr + j * 8, _sum); } outptr += outw * 8; } } }
sample-omp.c
#include <stdlib.h> #include <stdio.h> #include "omp.h" int main() { #pragma omp parallel { printf("hello world \n"); } }
5728.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (i, j, k) num_threads(#P11) { /* E := A*B */ #pragma omp target teams distribute thread_limit(256) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp target teams distribute thread_limit(256) for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp target teams distribute thread_limit(256) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
csr_elementwise.h
/* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cusp/array1d.h> #ifdef _OPENMP #include <thrust/scan.h> #include <thrust/system/omp/execution_policy.h> #endif //_OPENMP namespace cusp { namespace detail { namespace host { namespace detail { template <typename Matrix1, typename Matrix2, typename Matrix3, typename BinaryFunction> void csr_transform_elementwise(const Matrix1& A, const Matrix2& B, Matrix3& C, BinaryFunction op) { //Method that works for duplicate and/or unsorted indices typedef typename Matrix3::index_type IndexType; typedef typename Matrix3::value_type ValueType; #ifndef _OPENMP cusp::csr_matrix<IndexType,ValueType,cusp::host_memory> temp( A.num_rows, A.num_cols, A.num_entries + B.num_entries); temp.row_offsets[0] = 0; size_t nnz = 0; #endif //_OPENMP //MW: compute number of nonzeros in each row of C #ifdef _OPENMP cusp::array1d<IndexType, cusp::host_memory> C_row_offsets( A.num_rows + 1); C_row_offsets[0] = 0; #pragma omp parallel for for(size_t i = 0; i < A.num_rows; i++) { size_t num_nonzeros_in_row_i = B.row_offsets[i+1]-B.row_offsets[i]; for(IndexType jj = A.row_offsets[i]; jj < A.row_offsets[i+1]; jj++) { IndexType j = A.column_indices[jj]; bool different = true; for(IndexType kk = B.row_offsets[i]; kk < B.row_offsets[i+1]; kk++) { IndexType k = B.column_indices[kk]; if( j == k) { different = false; break; } } if( different) num_nonzeros_in_row_i++; } C_row_offsets[i+1] = num_nonzeros_in_row_i; } //omp for //MW: now transform to offsets and resize column and values thrust::inclusive_scan(thrust::omp::par, C_row_offsets.begin(), C_row_offsets.end(), C_row_offsets.begin()); size_t num_entries_in_C = C_row_offsets[A.num_rows]; cusp::array1d<IndexType, cusp::host_memory> C_column_indices( num_entries_in_C); //MW: cheap cusp::array1d<ValueType, cusp::host_memory> C_values( num_entries_in_C); //MW: cheap #endif //_OPENMP #pragma omp parallel { cusp::array1d<IndexType,cusp::host_memory> next(A.num_cols, IndexType(-1)); cusp::array1d<ValueType,cusp::host_memory> A_row(A.num_cols, ValueType(0)); cusp::array1d<ValueType,cusp::host_memory> B_row(A.num_cols, ValueType(0)); #pragma omp for for(size_t i = 0; i < A.num_rows; i++) { IndexType head = -2; IndexType length = 0; //add a row of A to A_row IndexType i_start = A.row_offsets[i]; IndexType i_end = A.row_offsets[i + 1]; for(IndexType jj = i_start; jj < i_end; jj++) { IndexType j = A.column_indices[jj]; A_row[j] += A.values[jj]; if(next[j] == -1) { next[j] = head; head = j; length++; } } //add a row of B to B_row i_start = B.row_offsets[i]; i_end = B.row_offsets[i + 1]; for(IndexType jj = i_start; jj < i_end; jj++) { IndexType j = B.column_indices[jj]; B_row[j] += B.values[jj]; if(next[j] == -1) { next[j] = head; head = j; length++; } } // scan through columns where A or B has // contributed a non-zero entry // MW iterate through list without destroying it #ifdef _OPENMP IndexType j = C_row_offsets[i]; #endif //_OPENMP for(IndexType jj = 0; jj < length; jj++) { ValueType result = op( A_row[head], B_row[head]); #ifdef _OPENMP C_column_indices[j + jj] = head; C_values[j+jj] = result; #else if(result != 0) { temp.column_indices[nnz] = head; temp.values[nnz] = result; nnz++; } #endif //_OPENMP IndexType prev = head; head = next[head]; next[prev] = -1; A_row[prev] = 0; B_row[prev] = 0; } #ifndef _OPENMP temp.row_offsets[i + 1] = nnz; #endif //_OPENMP } //omp for } //omp parallel #ifdef _OPENMP C.row_offsets.swap( C_row_offsets); C.column_indices.swap( C_column_indices); C.values.swap( C_values); C.resize( A.num_rows, A.num_cols, num_entries_in_C); #endif //_OPENMP // TODO replace with destructive assignment? #ifndef _OPENMP temp.resize(A.num_rows, A.num_cols, nnz); cusp::copy(temp, C); //MW: why not swap?? #endif //_OPENMP } // csr_transform_elementwise } // end namespace detail } // end namespace host } // end namespace detail } // end namespace cusp
test39.c
#include<stdio.h> int main () { int x = 1; int y = (++x) + x; printf("%d", y); #pragma omp parallel single { printf("Hello"); } }
GB_unop__abs_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__abs_fp32_fp32 // op(A') function: GB_unop_tran__abs_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = fabsf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__abs_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = fabsf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = fabsf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__abs_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fp32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint32) // op(A') function: GB (_unop_tran__identity_fp32_uint32) // C type: float // A type: uint32_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint32) ( float *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetMagickToken(p,&p,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.21267f*image->colormap[i].red+0.71516f*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(q)+0.71516f*GetPixelGreen(q)+ 0.07217f*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); exception=(&image->exception); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); (void) InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust), QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImageChannel) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ exception=(&image->exception); if (IsGrayImage(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImageChannel) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define Enhance(weight) \ mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \ distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \ distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \ mean)*distance*distance; \ mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \ distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) pixel.green; \ distance_squared+=4.0*distance*distance; \ mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \ distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) pixel.blue; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ mean=((MagickRealType) r->opacity+pixel.opacity)/2; \ distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \ QuantumRange/25.0f)) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket aggregate; MagickRealType distance, distance_squared, mean, total_weight; PixelPacket pixel; register const PixelPacket *restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); r=p+(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+2*(image->columns+4); Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0); r=p+3*(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+4*(image->columns+4); Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/ total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) ResetMagickMemory(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImageChannel) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status|=GammaImageChannel(image,GreenChannel,(double) gamma.green); status|=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma)))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,1.0/gamma); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,1.0/gamma); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,1.0/gamma); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,1.0/gamma); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), 1.0/gamma)); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), 1.0/gamma)); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), 1.0/gamma)); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), 1.0/gamma)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),1.0/gamma)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), 1.0/gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),1.0/gamma)); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),1.0/gamma)); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImageChannel) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ static inline MagickRealType MagickMax(const MagickRealType x, const MagickRealType y) { if (x > y) return(x); return(y); } static inline MagickRealType MagickMin(const MagickRealType x, const MagickRealType y) { if (x < y) return(x); return(y); } MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } switch (image->intensity) { case Rec601LuminancePixelIntensityMethod: case Rec709LuminancePixelIntensityMethod: { (void) SetImageColorspace(image,RGBColorspace); break; } case Rec601LumaPixelIntensityMethod: case Rec709LumaPixelIntensityMethod: case UndefinedPixelIntensityMethod: { (void) SetImageColorspace(image,sRGBColorspace); break; } default: break; } /* Grayscale image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.21260f*red+0.71520f*green+0.07220f*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.21260f*red+0.71520f*green+0.07220f*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GrayscaleImageChannel) #endif proceed=SetImageProgress(image,GrayscaleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel3); offset+=cube_size; (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel4); MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,point.z,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImageChannel) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantiumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0; level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/ gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImageChannel) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantiumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImageChannel) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickFalse; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status|=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status|=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white, 1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness, Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity, Quantum *red,Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness, Quantum *red,Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; if (IssRGBColorspace(image->colorspace) != MagickFalse) { red=ClampToQuantum(DecodePixelGamma((MagickRealType) red)); green=ClampToQuantum(DecodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) blue)); } switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } if (IssRGBColorspace(image->colorspace) != MagickFalse) { red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); if (IssRGBColorspace(image->colorspace) != MagickFalse) { red=ClampToQuantum(DecodePixelGamma((MagickRealType) red)); green=ClampToQuantum(DecodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) blue)); } switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } if (IssRGBColorspace(image->colorspace) != MagickFalse) { red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImageChannel) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
solution.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <omp.h> // Определение функции double Func(double x) { // Недействительные значения не должны вносить вклад в интеграл if (x > 2) { return 0; } return sqrt(4 - x*x); } // Формула Котеса рассчета определенного интеграла для равномерной сетки double Integral(size_t left_index, size_t right_index, double h) { double I = (Func(right_index * h) + Func(left_index * h)) / 2; for (size_t i = left_index + 1; i < right_index; i++) { I += Func(i * h); } return I * h; } int main(int argc, char **argv) { // Количество шагов size_t N = 1000000; // Запрошенное кол-во процессов int size = 1; // Количество последовательных выполнений программы // для получения среднего времени выполнения size_t numexp = 1; if (argc > 1) { N = atoll(argv[1]); if (argc > 2) { size = atoi(argv[2]); if (argc > 3) { numexp = atoll(argv[3]); } } } // Задаем границы интегрирования double a = 0, b = 2; // Задаем мелкость разбиения отрезка double h = (b - a) / N; double result = 0.0; // Создание замка omp_lock_t lock; // Инициализация замка omp_init_lock(&lock); for (size_t i = 0; i < numexp; ++)i { // Устанавливаем требуемое кол-во процессов omp_set_num_threads(size); // Начало параллельной секции #pragma omp parallel { // Устанавливаем ранг процесса int rank = omp_get_thread_num(); // Передаем каждому процессу "свои" индексы интегрирования size_t left_index = rank * (N / size); size_t right_index = (rank != size - 1) ? (rank + 1) * (N / size) : N; // Определяем интеграл на заданном интервале double integral = Integral(left_index, right_index, h); // Заблокировать замок omp_set_lock(&lock); // Сбор значений со всех потоков result += integral; // Разблокировать замок omp_unset_lock(&lock); } } // Удаление замка omp_destroy_lock(&lock); // Вывод кол-ва процессов, используемого программой, и значение интеграла printf("%d %lf\n", size, result / numexp); return EXIT_SUCCESS; }
GB_unaryop__abs_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_fp32 // op(A') function: GB_tran__abs_bool_fp32 // C type: bool // A type: float // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_fp32 ( bool *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mem.c
/* Copyright 2013-2015. The Regents of the University of California. * Copyright 2016. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de> * */ #include <stdbool.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc/misc.h" #include "misc/debug.h" #include "mem.h" bool memcache = true; void memcache_off(void) { memcache = false; } struct mem_s { const void* ptr; size_t len; bool device; bool free; int device_id; int thread_id; struct mem_s* next; }; static struct mem_s* mem_list = NULL; //sorted list of free memory->use smallest possible mem_s for new allocation static struct mem_s* mem_list_free = NULL; // search can stop early if not min_ptr<=ptr<=max_ptr void* min_ptr = NULL; void* max_ptr = NULL; static bool inside_p(const struct mem_s* rptr, const void* ptr) { return (ptr >= rptr->ptr) && (ptr < rptr->ptr + rptr->len); } static struct mem_s* search(const void* ptr, bool remove) { struct mem_s* rptr = NULL; if ((NULL == min_ptr) || (ptr < min_ptr) || (ptr > max_ptr)) return rptr; #pragma omp critical { struct mem_s** nptr = &mem_list; while (true) { rptr = *nptr; if (NULL == rptr) break; if (inside_p(rptr, ptr)) { *nptr = rptr->next; break; } nptr = &(rptr->next); } if ((NULL != rptr) && (!remove)) { rptr->next = mem_list; mem_list = rptr; } } return rptr; } static bool free_check_p(const struct mem_s* rptr, size_t size, int dev, int tid) { return (rptr->free && (rptr->device_id == dev) && (rptr->len >= size) && (( 0 == size) || (rptr->len <= 4 * size)) // small allocations shall not occupy large memory areas (turned of if requested size is 0) && ((-1 == tid) || (rptr->thread_id == tid))); } static struct mem_s** find_free_unsafe(size_t size, int dev, int tid) { struct mem_s* rptr = NULL; struct mem_s** nptr = &mem_list_free; while (true) { rptr = *nptr; if (NULL == rptr) break; if (free_check_p(rptr, size, dev, tid)) break; nptr = &(rptr->next); } return nptr; } static struct mem_s* find_free(size_t size, int dev) { struct mem_s* rptr = NULL; #pragma omp critical { struct mem_s** nrptr = find_free_unsafe(size, dev, -1); if (NULL != *nrptr) { rptr = *nrptr; *nrptr = rptr->next; rptr->free = false; } } return rptr; } static void insert(const void* ptr, size_t len, bool device, int dev) { PTR_ALLOC(struct mem_s, nptr); nptr->ptr = ptr; nptr->len = len; nptr->device = device; nptr->device_id = dev; #ifdef _OPENMP nptr->thread_id = omp_get_thread_num(); #else nptr->thread_id = -1; #endif nptr->free = false; #pragma omp critical { nptr->next = mem_list; mem_list = PTR_PASS(nptr); } } void memcache_clear(int dev, void (*device_free)(const void*x)) { struct mem_s* nptr = NULL; if (!memcache) return; do { #pragma omp critical { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = -1; #endif struct mem_s** rptr = find_free_unsafe(0, dev, tid); nptr = *rptr; // remove from list if (NULL != nptr) *rptr = nptr->next; } if (NULL != nptr) { assert(nptr->device); debug_printf(DP_DEBUG3, "Freeing %ld bytes. (DID: %d TID: %d)\n\n", nptr->len, nptr->device_id, nptr->thread_id); device_free(nptr->ptr); xfree(nptr); } } while (NULL != nptr); } bool mem_ondevice(const void* ptr) { if (NULL == ptr) return false; struct mem_s* p = search(ptr, false); bool r = ((NULL != p) && p->device); return r; } bool mem_device_accessible(const void* ptr) { struct mem_s* p = search(ptr, false); return (NULL != p); } void mem_device_free(void* ptr, void (*device_free)(const void* ptr)) { struct mem_s* nptr = search(ptr, true); assert(NULL != nptr); assert(nptr->ptr == ptr); assert(nptr->device); if (memcache) { assert(!nptr->free); nptr->free = true; #pragma omp critical { struct mem_s** pos_ins = &mem_list_free; while ((NULL != *pos_ins) && (nptr->len > (*pos_ins)->len)) pos_ins = &((*pos_ins)->next); nptr->next = *pos_ins; *pos_ins = nptr; } } else { device_free(ptr); xfree(nptr); } } void* mem_device_malloc(int device, long size, void* (*device_alloc)(size_t)) { if (memcache) { struct mem_s* nptr = find_free(size, device); if (NULL != nptr) { assert(nptr->device); assert(!nptr->free); #ifdef _OPENMP nptr->thread_id = omp_get_thread_num(); #else nptr->thread_id = -1; #endif #pragma omp critical { nptr->next = mem_list; mem_list = nptr; } return (void*)(nptr->ptr); } } void* ptr = device_alloc(size); if ((NULL == min_ptr) || (ptr < min_ptr)) min_ptr = ptr; if ((NULL == max_ptr) || (ptr + size > max_ptr)) max_ptr = ptr + size; insert(ptr, size, true, device); return ptr; }
convolution_1x1_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-4a-inch/4a-outch/4b #if __aarch64__ kernel_tm_pack4.create(2 * 1, inch / 4, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(1, inch / 4, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; const float* k4 = (const float*)kernel + (q + 4) * inch; const float* k5 = (const float*)kernel + (q + 5) * inch; const float* k6 = (const float*)kernel + (q + 6) * inch; const float* k7 = (const float*)kernel + (q + 7) * inch; float* g0 = kernel_tm_pack4.channel(q / 8); for (int p = 0; p + 3 < inch; p += 4) { g0[0] = k0[0]; g0[1] = k1[0]; g0[2] = k2[0]; g0[3] = k3[0]; g0[4] = k4[0]; g0[5] = k5[0]; g0[6] = k6[0]; g0[7] = k7[0]; g0[8] = k0[1]; g0[9] = k1[1]; g0[10] = k2[1]; g0[11] = k3[1]; g0[12] = k4[1]; g0[13] = k5[1]; g0[14] = k6[1]; g0[15] = k7[1]; g0[16] = k0[2]; g0[17] = k1[2]; g0[18] = k2[2]; g0[19] = k3[2]; g0[20] = k4[2]; g0[21] = k5[2]; g0[22] = k6[2]; g0[23] = k7[2]; g0[24] = k0[3]; g0[25] = k1[3]; g0[26] = k2[3]; g0[27] = k3[3]; g0[28] = k4[3]; g0[29] = k5[3]; g0[30] = k6[3]; g0[31] = k7[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; k4 += 4; k5 += 4; k6 += 4; k7 += 4; g0 += 32; } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; #if __aarch64__ float* g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else float* g0 = kernel_tm_pack4.channel(q / 4); #endif for (int p = 0; p + 3 < inch; p += 4) { g0[0] = k0[0]; g0[1] = k1[0]; g0[2] = k2[0]; g0[3] = k3[0]; g0[4] = k0[1]; g0[5] = k1[1]; g0[6] = k2[1]; g0[7] = k3[1]; g0[8] = k0[2]; g0[9] = k1[2]; g0[10] = k2[2]; g0[11] = k3[2]; g0[12] = k0[3]; g0[13] = k1[3]; g0[14] = k2[3]; g0[15] = k3[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; g0 += 16; } } } static void conv1x1s1_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave #if __aarch64__ Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator); #else Mat tmp(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); #endif { int nn_size; int remain_size_start; #if __aarch64__ nn_size = size / 12; remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 12; const float* img0 = bottom_blob.channel(0); img0 += i * 4; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); img0 += bottom_blob.cstep * 4; } } #else remain_size_start = 0; #endif nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i * 4; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); #else float* tmpptr = tmp.channel(i / 8); #endif for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i * 4; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #endif for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const float* img0 = bottom_blob.channel(0); img0 += i * 4; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1"); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i * 4; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr01 = (const float*)kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr01 = (const float*)kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr01 = (const float*)kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < size; i += 2) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr01 = (const float*)kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v1.16b \n" "mov v19.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr01 = (const float*)kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.4s, v17.4s}, [%10] \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; #if __aarch64__ for (; i + 11 < size; i += 12) { float* tmpptr = tmp.channel(i / 12); const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < size; i += 8) { #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2); #else float* tmpptr = tmp.channel(i / 8); const float* kptr0 = (const float*)kernel.channel(p); #endif int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "vmov q12, q0 \n" "vmov q13, q0 \n" "vmov q14, q0 \n" "vmov q15, q0 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < size; i += 4) { #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const float* kptr0 = (const float*)kernel.channel(p); #endif int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < size; i += 2) { #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); const float* kptr0 = (const float*)kernel.channel(p); #endif int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < size; i++) { #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const float* kptr0 = (const float*)kernel.channel(p); #endif int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v16.4s}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "vld1.f32 {d16-d17}, [%8] \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float32x4_t _v = vld1q_f32(r0); vst1q_f32(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
GB_unaryop__lnot_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_uint32 // op(A') function: GB_tran__lnot_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_uint32 ( bool *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MzXMLHandler.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2013. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Andreas Bertsch $ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #ifndef OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H #define OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H #include <OpenMS/CONCEPT/ProgressLogger.h> #include <OpenMS/FORMAT/Base64.h> #include <OpenMS/FORMAT/OPTIONS/PeakFileOptions.h> #include <OpenMS/FORMAT/HANDLERS/XMLHandler.h> #include <OpenMS/DATASTRUCTURES/String.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/INTERFACES/IMSDataConsumer.h> #include <stack> namespace OpenMS { class MetaInfoInterface; namespace Internal { /** @brief XML handlers for MzXMLFile MapType has to be a MSExperiment or have the same interface. Do not use this class. It is only needed in MzXMLFile. */ template <typename MapType> class MzXMLHandler : public XMLHandler { public: /**@name Constructors and destructor */ //@{ /// Constructor for a read-only handler MzXMLHandler(MapType& exp, const String& filename, const String& version, ProgressLogger& logger) : XMLHandler(filename, version), exp_(&exp), cexp_(0), decoder_(), nesting_level_(0), skip_spectrum_(false), spec_write_counter_(1), consumer_(NULL), scan_count_(0), logger_(logger) { init_(); } /// Constructor for a write-only handler MzXMLHandler(const MapType& exp, const String& filename, const String& version, const ProgressLogger& logger) : XMLHandler(filename, version), exp_(0), cexp_(&exp), decoder_(), nesting_level_(0), skip_spectrum_(false), spec_write_counter_(1), consumer_(NULL), scan_count_(0), logger_(logger) { init_(); } /// Destructor virtual ~MzXMLHandler() {} //@} // Docu in base class virtual void endElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname); // Docu in base class virtual void startElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname, const xercesc::Attributes& attributes); // Docu in base class virtual void characters(const XMLCh* const chars, const XMLSize_t length); /// Write the contents to a stream void writeTo(std::ostream& os); /// Sets the options void setOptions(const PeakFileOptions& options) { options_ = options; } ///Gets the scan count UInt getScanCount() { return scan_count_; } /// Set the IMSDataConsumer consumer which will consume the read data void setMSDataConsumer(Interfaces::IMSDataConsumer<MapType> * consumer) { consumer_ = consumer; } private: /// initialize members (call from C'tor) void init_() { cv_terms_.resize(6); //Polarity String("any;+;-").split(';', cv_terms_[0]); //Scan type // is no longer used cv_terms_[1] is empty now //Ionization method String(";ESI;EI;CI;FAB;;;;;;;;;;;;;APCI;;;;;;;;MALDI").split(';', cv_terms_[2]); cv_terms_[2].resize(IonSource::SIZE_OF_IONIZATIONMETHOD); //Mass analyzer String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';', cv_terms_[3]); cv_terms_[3].resize(MassAnalyzer::SIZE_OF_ANALYZERTYPE); //Detector String(";EMT;;;Faraday Cup;;;;;Channeltron;Daly;Microchannel plate").split(';', cv_terms_[4]); cv_terms_[4].resize(IonDetector::SIZE_OF_TYPE); //Resolution method String(";FWHM;TenPercentValley;Baseline").split(';', cv_terms_[5]); cv_terms_[5].resize(MassAnalyzer::SIZE_OF_RESOLUTIONMETHOD); /* // OLD: cv_terms_.resize(6); //Polarity String("any;+;-").split(';',cv_terms_[0]); //Scan type // is no longer used cv_terms_[1] is empty now //Ionization method String(";ESI;EI;CI;FAB;TSP;MALDI;FD;FI;PD;SI;TI;API;ISI;CID;CAD;HN;APCI;APPI;ICP").split(';',cv_terms_[2]); //Mass analyzer String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';',cv_terms_[3]); //Detector String(";EMT;Daly;;Faraday Cup;;;;Channeltron").split(';',cv_terms_[4]); //Resolution method String(";FWHM;TenPercentValley;Baseline").split(';',cv_terms_[5]); */ } protected: /// Peak type typedef typename MapType::PeakType PeakType; /// Spectrum type typedef MSSpectrum<PeakType> SpectrumType; /// map pointer for reading MapType* exp_; /// map pointer for writing const MapType* cexp_; /// Options for loading and storing PeakFileOptions options_; /**@name temporary data structures to hold parsed data */ //@{ Base64 decoder_; Int nesting_level_; /** @brief Data necessary to generate a single spectrum Small struct holds all data necessary to populate a spectrum at a later timepoint (since reading of the base64 data and generation of spectra can be done at distinct timepoints). */ struct SpectrumData { UInt peak_count_; String precision_; String compressionType_; String char_rest_; SpectrumType spectrum; bool skip_data; }; /// Vector of spectrum data stored for later parallel processing std::vector< SpectrumData > spectrum_data_; //@} /// Flag that indicates whether this spectrum should be skipped (due to options) bool skip_spectrum_; /// spectrum counter (spectra without peaks are not written) UInt spec_write_counter_; /// Consumer class to work on spectra Interfaces::IMSDataConsumer<MapType>* consumer_; /// Consumer class to work on spectra UInt scan_count_; /// Progress logging class const ProgressLogger& logger_; /// write metaInfo to xml (usually in nameValue-tag) inline void writeUserParam_(std::ostream& os, const MetaInfoInterface& meta, int indent = 4, String tag = "nameValue") { std::vector<String> keys; // Vector to hold keys to meta info meta.getKeys(keys); for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it) { if ((*it)[0] != '#') // internally used meta info start with '#' { os << String(indent, '\t') << "<" << tag << " name=\"" << *it << "\" value=\"" << meta.getMetaValue(*it) << "\"/>\n"; } } } /// data processing auxiliary variable std::vector<DataProcessing> data_processing_; /** @brief Fill a single spectrum with data from input @note Do not modify any internal state variables of the class since this function will be executed in parallel. */ void doPopulateSpectraWithData_(SpectrumData & spectrum_data) { typedef typename SpectrumType::PeakType PeakType; Base64 decoder_; //std::cout << "reading scan" << "\n"; if (spectrum_data.char_rest_ == "") // no peaks { return; } //remove whitespaces from binary data //this should not be necessary, but linebreaks inside the base64 data are unfortunately no exception spectrum_data.char_rest_.removeWhitespaces(); if (spectrum_data.precision_ == "64") { std::vector<DoubleReal> data; if (spectrum_data.compressionType_ == "zlib") { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true); } else { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data); } spectrum_data.char_rest_ = ""; PeakType peak; //push_back the peaks into the container for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2) { // check if peak in in the specified m/z and intensity range if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n]))) && (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1])))) { peak.setMZ(data[n]); peak.setIntensity(data[n + 1]); spectrum_data.spectrum.push_back(peak); } } } else //precision 32 { std::vector<Real> data; if (spectrum_data.compressionType_ == "zlib") { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true); } else { decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data); } spectrum_data.char_rest_ = ""; PeakType peak; //push_back the peaks into the container for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2) { if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n]))) && (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1])))) { peak.setMZ(data[n]); peak.setIntensity(data[n + 1]); spectrum_data.spectrum.push_back(peak); } } } } /** @brief Populate all spectra on the stack with data from input Will populate all spectra on the current work stack with data (using multiple threads if available) and append them to the result. */ void populateSpectraWithData_() { // Whether spectrum should be populated with data if (options_.getFillData()) { size_t errCount = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < (SignedSize)spectrum_data_.size(); i++) { // parallel exception catching and re-throwing business if (!errCount) // no need to parse further if already an error was encountered { try { doPopulateSpectraWithData_(spectrum_data_[i]); } catch (...) { #pragma omp critical(HandleException) ++errCount; } } } if (errCount != 0) { throw Exception::ParseError(__FILE__, __LINE__, __PRETTY_FUNCTION__, file_, "Error during parsing of binary data."); } } // Append all spectra for (Size i = 0; i < spectrum_data_.size(); i++) { if (consumer_ != NULL) { consumer_->consumeSpectrum(spectrum_data_[i].spectrum); if (options_.getAlwaysAppendData()) { exp_->addSpectrum(spectrum_data_[i].spectrum); } } else { exp_->addSpectrum(spectrum_data_[i].spectrum); } } // Delete batch spectrum_data_.clear(); } private: /// Not implemented MzXMLHandler(); static const XMLCh* s_value_; static const XMLCh* s_count_; static const XMLCh* s_type_; static const XMLCh* s_name_; static const XMLCh* s_version_; static const XMLCh* s_filename_; static const XMLCh* s_filetype_; static const XMLCh* s_filesha1_; static const XMLCh* s_completiontime_; static const XMLCh* s_precision_; static const XMLCh* s_byteorder_; static const XMLCh* s_pairorder_; static const XMLCh* s_compressionType_; static const XMLCh* s_precursorintensity_; static const XMLCh* s_precursorcharge_; static const XMLCh* s_windowwideness_; static const XMLCh* s_mslevel_; static const XMLCh* s_peakscount_; static const XMLCh* s_polarity_; static const XMLCh* s_scantype_; static const XMLCh* s_retentiontime_; static const XMLCh* s_startmz_; static const XMLCh* s_endmz_; static const XMLCh* s_first_; static const XMLCh* s_last_; static const XMLCh* s_phone_; static const XMLCh* s_email_; static const XMLCh* s_uri_; static const XMLCh* s_num_; static const XMLCh* s_intensitycutoff_; static const XMLCh* s_centroided_; static const XMLCh* s_deisotoped_; static const XMLCh* s_chargedeconvoluted_; // init all the static members, which is necessary because otherwise the undefined order will cause problems void initStaticMembers_() { static bool init(false); if (!init) { s_value_ = xercesc::XMLString::transcode("value"); s_count_ = xercesc::XMLString::transcode("scanCount"); s_type_ = xercesc::XMLString::transcode("type"); s_name_ = xercesc::XMLString::transcode("name"); s_version_ = xercesc::XMLString::transcode("version"); s_filename_ = xercesc::XMLString::transcode("fileName"); s_filetype_ = xercesc::XMLString::transcode("fileType"); s_filesha1_ = xercesc::XMLString::transcode("fileSha1"); s_completiontime_ = xercesc::XMLString::transcode("completionTime"); s_precision_ = xercesc::XMLString::transcode("precision"); s_byteorder_ = xercesc::XMLString::transcode("byteOrder"); s_pairorder_ = xercesc::XMLString::transcode("pairOrder"); s_compressionType_ = xercesc::XMLString::transcode("compressionType"); s_precursorintensity_ = xercesc::XMLString::transcode("precursorIntensity"); s_precursorcharge_ = xercesc::XMLString::transcode("precursorCharge"); s_windowwideness_ = xercesc::XMLString::transcode("windowWideness"); s_mslevel_ = xercesc::XMLString::transcode("msLevel"); s_peakscount_ = xercesc::XMLString::transcode("peaksCount"); s_polarity_ = xercesc::XMLString::transcode("polarity"); s_scantype_ = xercesc::XMLString::transcode("scanType"); s_retentiontime_ = xercesc::XMLString::transcode("retentionTime"); s_startmz_ = xercesc::XMLString::transcode("startMz"); s_endmz_ = xercesc::XMLString::transcode("endMz"); s_first_ = xercesc::XMLString::transcode("first"); s_last_ = xercesc::XMLString::transcode("last"); s_phone_ = xercesc::XMLString::transcode("phone"); s_email_ = xercesc::XMLString::transcode("email"); s_uri_ = xercesc::XMLString::transcode("URI"); s_num_ = xercesc::XMLString::transcode("num"); s_intensitycutoff_ = xercesc::XMLString::transcode("intensityCutoff"); s_centroided_ = xercesc::XMLString::transcode("centroided"); s_deisotoped_ = xercesc::XMLString::transcode("deisotoped"); s_chargedeconvoluted_ = xercesc::XMLString::transcode("chargeDeconvoluted"); init = true; } return; } }; //-------------------------------------------------------------------------------- // this cannot be moved into a function as VS2008 does not allow more than 31 static members in a function .. don't ask... template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_value_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_count_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_type_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_name_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_version_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_filename_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_filetype_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_filesha1_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_completiontime_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_precision_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_byteorder_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_pairorder_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_compressionType_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_precursorintensity_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_precursorcharge_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_windowwideness_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_mslevel_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_peakscount_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_polarity_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_scantype_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_retentiontime_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_startmz_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_endmz_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_first_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_last_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_phone_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_email_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_uri_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_num_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_intensitycutoff_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_centroided_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_deisotoped_ = 0; template <typename MapType> const XMLCh * MzXMLHandler<MapType>::s_chargedeconvoluted_ = 0; template <typename MapType> void MzXMLHandler<MapType>::startElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname, const xercesc::Attributes& attributes) { OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more") static bool init_static_members(false); if (!init_static_members) { initStaticMembers_(); } String tag = sm_.convert(qname); open_tags_.push_back(tag); //std::cout << " -- Start -- "<< tag << " -- " << "\n"; //Skip all tags until the the next scan if (skip_spectrum_ && tag != "scan") return; if (tag == "msRun") { Int count = 0; optionalAttributeAsInt_(count, attributes, s_count_); exp_->reserve(count); logger_.startProgress(0, count, "loading mzXML file"); scan_count_ = 0; data_processing_.clear(); //start and end time are xs:duration. This makes no sense => ignore them } else if (tag == "parentFile") { SourceFile sf; sf.setNameOfFile(attributeAsString_(attributes, s_filename_)); sf.setFileType(attributeAsString_(attributes, s_filetype_)); sf.setChecksum(attributeAsString_(attributes, s_filesha1_), SourceFile::SHA1); exp_->getSourceFiles().push_back(sf); } else if (tag == "software") { String& parent_tag = *(open_tags_.end() - 2); if (parent_tag == "dataProcessing") { data_processing_.back().getSoftware().setVersion(attributeAsString_(attributes, s_version_)); data_processing_.back().getSoftware().setName(attributeAsString_(attributes, s_name_)); data_processing_.back().setMetaValue("#type", String(attributeAsString_(attributes, s_type_))); String time; optionalAttributeAsString_(time, attributes, s_completiontime_); data_processing_.back().setCompletionTime(asDateTime_(time)); } else if (parent_tag == "msInstrument") { exp_->getInstrument().getSoftware().setVersion(attributeAsString_(attributes, s_version_)); exp_->getInstrument().getSoftware().setName(attributeAsString_(attributes, s_name_)); } } else if (tag == "peaks") { //precision spectrum_data_.back().precision_ = "32"; optionalAttributeAsString_(spectrum_data_.back().precision_, attributes, s_precision_); if (spectrum_data_.back().precision_ != "32" && spectrum_data_.back().precision_ != "64") { error(LOAD, String("Invalid precision '") + spectrum_data_.back().precision_ + "' in element 'peaks'"); } //byte order String byte_order = "network"; optionalAttributeAsString_(byte_order, attributes, s_byteorder_); if (byte_order != "network") { error(LOAD, String("Invalid or missing byte order '") + byte_order + "' in element 'peaks'. Must be 'network'!"); } //pair order String pair_order = "m/z-int"; optionalAttributeAsString_(pair_order, attributes, s_pairorder_); if (pair_order != "m/z-int") { error(LOAD, String("Invalid or missing pair order '") + pair_order + "' in element 'peaks'. Must be 'm/z-int'!"); } //compressionType spectrum_data_.back().compressionType_ = "none"; optionalAttributeAsString_(spectrum_data_.back().compressionType_, attributes, s_compressionType_); if (spectrum_data_.back().compressionType_ != "none" && spectrum_data_.back().compressionType_ != "zlib") { error(LOAD, String("Invalid compression type ") + spectrum_data_.back().compressionType_ + "in elements 'peaks'. Must be 'none' or 'zlib'! "); } } else if (tag == "precursorMz") { //add new precursor spectrum_data_.back().spectrum.getPrecursors().push_back(Precursor()); //intensity try { spectrum_data_.back().spectrum.getPrecursors().back().setIntensity(attributeAsDouble_(attributes, s_precursorintensity_)); } catch (Exception::ParseError& /*e*/) { error(LOAD, "Mandatory attribute 'precursorIntensity' of tag 'precursorMz' not found! Setting precursor intensity to zero!"); } //charge Int charge = 0; if (optionalAttributeAsInt_(charge, attributes, s_precursorcharge_)) { spectrum_data_.back().spectrum.getPrecursors().back().setCharge(charge); } //window bounds (here only the width is stored in both fields - this is corrected when we parse the m/z position) DoubleReal window = 0.0; if (optionalAttributeAsDouble_(window, attributes, s_windowwideness_)) { spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(window); } } else if (tag == "scan") { skip_spectrum_ = false; nesting_level_++; if (options_.getMetadataOnly()) throw EndParsingSoftly(__FILE__, __LINE__, __PRETTY_FUNCTION__); // check if the scan is in the desired MS / RT range UInt ms_level = attributeAsInt_(attributes, s_mslevel_); if (ms_level == 0) { warning(LOAD, String("Invalid 'msLevel' attribute with value '0' in 'scan' element found. Assuming ms level 1!")); ms_level = 1; } //parse retention time and convert it from xs:duration to seconds DoubleReal retention_time = 0.0; String time_string = ""; if (optionalAttributeAsString_(time_string, attributes, s_retentiontime_)) { time_string = time_string.suffix('T'); //std::cout << "Initial trim: " << time_string << "\n"; if (time_string.has('H')) { retention_time += 3600 * asDouble_(time_string.prefix('H')); time_string = time_string.suffix('H'); //std::cout << "After H: " << time_string << "\n"; } if (time_string.has('M')) { retention_time += 60 * asDouble_(time_string.prefix('M')); time_string = time_string.suffix('M'); //std::cout << "After M: " << time_string << "\n"; } if (time_string.has('S')) { retention_time += asDouble_(time_string.prefix('S')); time_string = time_string.suffix('S'); //std::cout << "After S: " << time_string << "\n"; } } logger_.setProgress(scan_count_); if ((options_.hasRTRange() && !options_.getRTRange().encloses(DPosition<1>(retention_time))) || (options_.hasMSLevels() && !options_.containsMSLevel(ms_level)) || options_.getSizeOnly()) { // skip this tag skip_spectrum_ = true; ++scan_count_; return; } // Add a new spectrum, initialize and set MS level and RT spectrum_data_.resize(spectrum_data_.size() + 1); // TODO !! spectrum_data_.back().peak_count_ = 0; spectrum_data_.back().spectrum.setMSLevel(ms_level); spectrum_data_.back().spectrum.setRT(retention_time); spectrum_data_.back().spectrum.setNativeID(String("scan=") + attributeAsString_(attributes, s_num_)); //peak count == twice the scan size spectrum_data_.back().peak_count_ = attributeAsInt_(attributes, s_peakscount_); spectrum_data_.back().spectrum.reserve(spectrum_data_.back().peak_count_ / 2 + 1); spectrum_data_.back().spectrum.setDataProcessing(data_processing_); //centroided, chargeDeconvoluted, deisotoped, collisionEnergy are ignored //other optional attributes ScanWindow window; optionalAttributeAsDouble_(window.begin, attributes, s_startmz_); optionalAttributeAsDouble_(window.end, attributes, s_endmz_); if (window.begin != 0.0 || window.end != 0.0) { spectrum_data_.back().spectrum.getInstrumentSettings().getScanWindows().push_back(window); } String polarity = "any"; optionalAttributeAsString_(polarity, attributes, s_polarity_); spectrum_data_.back().spectrum.getInstrumentSettings().setPolarity((IonSource::Polarity) cvStringToEnum_(0, polarity, "polarity")); String type = ""; optionalAttributeAsString_(type, attributes, s_scantype_); if (type == "") { //unknown/unset => do nothing here => no warning in the end } else if (type == "zoom") { spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true); spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "Full") { if (ms_level > 1) spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MSNSPECTRUM); else spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "SIM") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SIM); } else if (type == "SRM" || type == "MRM") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SRM); } else if (type == "CRM") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::CRM); } else if (type == "Q1") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "Q3") { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "EMS") //Non-standard type: Enhanced MS (ABI - Sashimi converter) { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (type == "EPI") //Non-standard type: Enhanced Product Ion (ABI - Sashimi converter) { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); spectrum_data_.back().spectrum.setMSLevel(2); } else if (type == "ER") // Non-standard type: Enhanced Resolution (ABI - Sashimi converter) { spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true); spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else { spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); warning(LOAD, String("Unknown scan mode '") + type + "'. Assuming full scan"); } ++scan_count_; } else if (tag == "operator") { exp_->getContacts().resize(1); exp_->getContacts().back().setFirstName(attributeAsString_(attributes, s_first_)); exp_->getContacts().back().setLastName(attributeAsString_(attributes, s_last_)); String tmp = ""; optionalAttributeAsString_(tmp, attributes, s_email_); exp_->getContacts().back().setEmail(tmp); tmp = ""; optionalAttributeAsString_(tmp, attributes, s_phone_); if (tmp != "") { exp_->getContacts().back().setMetaValue("#phone", tmp); } tmp = ""; optionalAttributeAsString_(tmp, attributes, s_uri_); exp_->getContacts().back().setURL(tmp); } else if (tag == "msManufacturer") { exp_->getInstrument().setVendor(attributeAsString_(attributes, s_value_)); } else if (tag == "msModel") { exp_->getInstrument().setModel(attributeAsString_(attributes, s_value_)); } else if (tag == "msIonisation") { exp_->getInstrument().getIonSources().resize(1); exp_->getInstrument().getIonSources()[0].setIonizationMethod((IonSource::IonizationMethod) cvStringToEnum_(2, attributeAsString_(attributes, s_value_), "msIonization")); } else if (tag == "msMassAnalyzer") { exp_->getInstrument().getMassAnalyzers().resize(1); exp_->getInstrument().getMassAnalyzers()[0].setType((MassAnalyzer::AnalyzerType) cvStringToEnum_(3, attributeAsString_(attributes, s_value_), "msMassAnalyzer")); } else if (tag == "msDetector") { exp_->getInstrument().getIonDetectors().resize(1); exp_->getInstrument().getIonDetectors()[0].setType((IonDetector::Type) cvStringToEnum_(4, attributeAsString_(attributes, s_value_), "msDetector")); } else if (tag == "msResolution") { exp_->getInstrument().getMassAnalyzers()[0].setResolutionMethod((MassAnalyzer::ResolutionMethod) cvStringToEnum_(5, attributeAsString_(attributes, s_value_), "msResolution")); } else if (tag == "dataProcessing") { data_processing_.push_back(DataProcessing()); String boolean = ""; optionalAttributeAsString_(boolean, attributes, s_deisotoped_); if (boolean == "true" || boolean == "1") { data_processing_.back().getProcessingActions().insert(DataProcessing::DEISOTOPING); } boolean = ""; optionalAttributeAsString_(boolean, attributes, s_chargedeconvoluted_); if (boolean == "true" || boolean == "1") { data_processing_.back().getProcessingActions().insert(DataProcessing::CHARGE_DECONVOLUTION); } DoubleReal cutoff = 0.0; optionalAttributeAsDouble_(cutoff, attributes, s_intensitycutoff_); if (cutoff != 0.0) { data_processing_.back().setMetaValue("#intensity_cutoff", cutoff); } boolean = ""; optionalAttributeAsString_(boolean, attributes, s_centroided_); if (boolean == "true" || boolean == "1") { data_processing_.back().getProcessingActions().insert(DataProcessing::PEAK_PICKING); } } else if (tag == "nameValue") { String name = ""; optionalAttributeAsString_(name, attributes, s_name_); if (name == "") return; String value = ""; optionalAttributeAsString_(value, attributes, s_value_); String& parent_tag = *(open_tags_.end() - 2); if (parent_tag == "msInstrument") { exp_->getInstrument().setMetaValue(name, value); } else if (parent_tag == "scan") { spectrum_data_.back().spectrum.setMetaValue(name, value); } else { std::cout << " Warning: Unexpected tag 'nameValue' in tag '" << parent_tag << "'" << "\n"; } } else if (tag == "processingOperation") { String name = ""; optionalAttributeAsString_(name, attributes, s_name_); if (name == "") return; String value = ""; optionalAttributeAsString_(value, attributes, s_value_); data_processing_.back().setMetaValue(name, value); } //std::cout << " -- !Start -- " << "\n"; } template <typename MapType> void MzXMLHandler<MapType>::endElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname) { OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more") //std::cout << " -- End -- " << sm_.convert(qname) << " -- " << "\n"; static const XMLCh* s_mzxml = xercesc::XMLString::transcode("mzXML"); static const XMLCh* s_scan = xercesc::XMLString::transcode("scan"); open_tags_.pop_back(); if (equal_(qname, s_mzxml)) { // Flush the remaining data populateSpectraWithData_(); // End of mzXML logger_.endProgress(); } else if (equal_(qname, s_scan)) { // End of scan: go up one nesting level // Check whether to populate spectra when on highest nesting level nesting_level_--; OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more") if (nesting_level_ == 0 && spectrum_data_.size() >= options_.getMaxDataPoolSize()) { populateSpectraWithData_(); } } //std::cout << " -- End -- " << "\n"; sm_.clear(); } template <typename MapType> void MzXMLHandler<MapType>::characters(const XMLCh* const chars, const XMLSize_t length) { //Abort if this spectrum should be skipped if (skip_spectrum_) return; if (open_tags_.back() == "peaks") { //chars may be split to several chunks => concatenate them if (options_.getFillData()) { // Since we convert a Base64 string here, it can only contain plain ASCII sm_.appendASCII(chars, length, spectrum_data_.back().char_rest_); } } else if (open_tags_.back() == "offset" || open_tags_.back() == "indexOffset" || open_tags_.back() == "sha1") { } else if (open_tags_.back() == "precursorMz") { char* transcoded_chars = sm_.convert(chars); DoubleReal mz_pos = asDouble_(transcoded_chars); //precursor m/z spectrum_data_.back().spectrum.getPrecursors().back().setMZ(mz_pos); //update window bounds - center them around the m/z pos DoubleReal window_width = spectrum_data_.back().spectrum.getPrecursors().back().getIsolationWindowLowerOffset(); if (window_width != 0.0) { spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(0.5 * window_width); spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowUpperOffset(0.5 * window_width); } } else if (open_tags_.back() == "comment") { char* transcoded_chars = sm_.convert(chars); String parent_tag = *(open_tags_.end() - 2); //std::cout << "- Comment of parent " << parent_tag << "\n"; if (parent_tag == "msInstrument") { exp_->getInstrument().setMetaValue("#comment", String(transcoded_chars)); } else if (parent_tag == "dataProcessing") { //this is currently ignored } else if (parent_tag == "scan") { spectrum_data_.back().spectrum.setComment(transcoded_chars); } else if (String(transcoded_chars).trim() != "") { warning(LOAD, String("Unhandled comment '") + transcoded_chars + "' in element '" + open_tags_.back() + "'"); } } else { char* transcoded_chars = sm_.convert(chars); if (String(transcoded_chars).trim() != "") { warning(LOAD, String("Unhandled character content '") + transcoded_chars + "' in element '" + open_tags_.back() + "'"); } } } template <typename MapType> void MzXMLHandler<MapType>::writeTo(std::ostream& os) { //determine how many spectra there are (count only those with peaks) UInt count_tmp_ = 0; for (Size s = 0; s < cexp_->size(); s++) { const SpectrumType& spec = (*cexp_)[s]; if (spec.size() != 0) ++count_tmp_; } if (count_tmp_ == 0) ++count_tmp_; logger_.startProgress(0, cexp_->size(), "storing mzXML file"); os << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n" << "<mzXML xmlns=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1\" " << "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" " << "xsi:schemaLocation=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1 " << "http://sashimi.sourceforge.net/schema_revision/mzXML_2.1/mzXML_idx_2.1.xsd\">\n" << "\t<msRun scanCount=\"" << count_tmp_ << "\">\n"; //---------------------------------------------------------------------------------------- // parent files //---------------------------------------------------------------------------------------- if (cexp_->getSourceFiles().empty()) { os << "\t\t<parentFile fileName=\"\" fileType=\"processedData\" fileSha1=\"0000000000000000000000000000000000000000\"/>\n"; } else { for (Size i = 0; i < cexp_->getSourceFiles().size(); ++i) { const SourceFile& sf = cexp_->getSourceFiles()[i]; os << "\t\t<parentFile fileName=\"" << sf.getNameOfFile() << "\" fileType=\""; //file type is an enum in mzXML => search for 'raw' string String tmp_string = sf.getFileType(); tmp_string.toLower(); if (tmp_string.hasSubstring("raw")) { os << "RAWData"; } else { os << "processedData"; } //Sha1 checksum must have 40 characters => create a fake if it is unknown os << "\" fileSha1=\""; tmp_string = sf.getChecksum(); if (sf.getChecksum().size() != 40 || sf.getChecksumType() != SourceFile::SHA1) { os << "0000000000000000000000000000000000000000"; } else { os << sf.getChecksum(); } os << "\"/>\n"; } } //---------------------------------------------------------------------------------------- //instrument //---------------------------------------------------------------------------------------- if (cexp_->getInstrument() != Instrument() || cexp_->getContacts().size() != 0) { const Instrument& inst = cexp_->getInstrument(); os << "\t\t<msInstrument>\n" << "\t\t\t<msManufacturer category=\"msManufacturer\" value=\"" << inst.getVendor() << "\"/>\n" << "\t\t\t<msModel category=\"msModel\" value=\"" << inst.getModel() << "\"/>\n"; if (inst.getIonSources().empty() || !inst.getIonSources()[0].getIonizationMethod()) { os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"\"/>\n"; } else { os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"" << cv_terms_[2][inst.getIonSources()[0].getIonizationMethod()] << "\"/>\n"; } const std::vector<MassAnalyzer>& analyzers = inst.getMassAnalyzers(); if (analyzers.empty() || !analyzers[0].getResolutionMethod()) { os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"\"/>\n"; } else { os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"" << cv_terms_[3][analyzers[0].getType()] << "\"/>\n"; } if (inst.getIonDetectors().empty() || !inst.getIonDetectors()[0].getType()) { os << "\t\t\t<msDetector category=\"msDetector\" value=\"\"/>\n"; } else { os << "\t\t\t<msDetector category=\"msDetector\" value=\"" << cv_terms_[4][inst.getIonDetectors()[0].getType()] << "\"/>\n"; } os << "\t\t\t<software type=\"acquisition\" name=\"" << inst.getSoftware().getName() << "\" version=\"" << inst.getSoftware().getVersion() << "\"/>\n"; if (analyzers.empty() || !analyzers[0].getResolutionMethod()) { os << "\t\t\t<msResolution category=\"msResolution\" value=\"\"/>\n"; } else { os << "\t\t\t<msResolution category=\"msResolution\" value=\"" << cv_terms_[5][analyzers[0].getResolutionMethod()] << "\"/>\n"; } if (cexp_->getContacts().size() > 0) { const ContactPerson& cont = cexp_->getContacts()[0]; os << "\t\t\t<operator first=\"" << cont.getFirstName() << "\" last=\"" << cont.getLastName() << "\""; if (cont.getEmail() != "") { os << " email=\"" << cont.getEmail() << "\""; } if (cont.getURL() != "") { os << " URI=\"" << cont.getURL() << "\""; } if (cont.metaValueExists("#phone")) { os << " phone=\"" << (String)(cont.getMetaValue("#phone")) << "\""; } os << "/>\n"; } writeUserParam_(os, inst, 3); if (inst.metaValueExists("#comment")) { os << "\t\t\t<comment>" << inst.getMetaValue("#comment") << "</comment>\n"; } os << "\t\t</msInstrument>\n"; } //---------------------------------------------------------------------------------------- //data processing (the information of the first spectrum is assigned to the whole file) //---------------------------------------------------------------------------------------- if (cexp_->size() == 0 || (*cexp_)[0].getDataProcessing().empty()) { os << "\t\t<dataProcessing>\n" << "\t\t\t<software type=\"processing\" name=\"\" version=\"\"/>\n" << "\t\t</dataProcessing>\n"; } else { for (Size i = 0; i < (*cexp_)[0].getDataProcessing().size(); ++i) { const DataProcessing& data_processing = (*cexp_)[0].getDataProcessing()[i]; os << "\t\t<dataProcessing deisotoped=\"" << data_processing.getProcessingActions().count(DataProcessing::DEISOTOPING) << "\" chargeDeconvoluted=\"" << data_processing.getProcessingActions().count(DataProcessing::CHARGE_DECONVOLUTION) << "\" centroided=\"" << data_processing.getProcessingActions().count(DataProcessing::PEAK_PICKING) << "\""; if (data_processing.metaValueExists("#intensity_cutoff")) { os << " intensityCutoff=\"" << data_processing.getMetaValue("#intensity_cutoff").toString() << "\""; } os << ">\n" << "\t\t\t<software type=\""; if (data_processing.metaValueExists("#type")) { os << data_processing.getMetaValue("#type").toString(); } else { os << "processing"; } os << "\" name=\"" << data_processing.getSoftware().getName() << "\" version=\"" << data_processing.getSoftware().getVersion(); if (data_processing.getCompletionTime() != DateTime()) { os << "\" completionTime=\"" << data_processing.getCompletionTime().get().substitute(' ', 'T'); } os << "\"/>\n"; writeUserParam_(os, data_processing, 3, "processingOperation"); os << "\t\t</dataProcessing>\n"; } } //check if the nativeID of all spectra are numbers or numbers prefixed with 'scan=' //If not we need to renumber all spectra. bool all_numbers = true; bool all_empty = true; bool all_prefixed_numbers = true; for (Size s = 0; s < cexp_->size(); s++) { String native_id = (*cexp_)[s].getNativeID(); if (!native_id.hasPrefix("scan=")) { all_prefixed_numbers = false; } else { native_id = native_id.substr(5); } try { native_id.toInt(); } catch (Exception::ConversionError&) { all_numbers = false; all_prefixed_numbers = false; if (native_id != "") { all_empty = false; } } } //If we need to renumber and the nativeIDs were not empty, warn the user if (!all_numbers && !all_empty) { warning(STORE, "Not all spectrum native IDs are numbers or correctly prefixed with 'scan='. The spectra are renumbered and the native IDs are lost!"); } // write scans std::stack<UInt> open_scans; for (Size s = 0; s < cexp_->size(); s++) { logger_.setProgress(s); const SpectrumType& spec = (*cexp_)[s]; UInt ms_level = spec.getMSLevel(); open_scans.push(ms_level); Size spectrum_id = s + 1; if (all_prefixed_numbers) { spectrum_id = spec.getNativeID().substr(5).toInt(); } else if (all_numbers) { spectrum_id = spec.getNativeID().toInt(); } os << String(ms_level + 1, '\t') << "<scan num=\"" << spectrum_id << "\" msLevel=\"" << ms_level << "\" peaksCount=\"" << spec.size() << "\" polarity=\""; if (spec.getInstrumentSettings().getPolarity() == IonSource::POSITIVE) { os << "+"; } else if (spec.getInstrumentSettings().getPolarity() == IonSource::NEGATIVE) { os << "-"; } else { os << "any"; } //scan type switch (spec.getInstrumentSettings().getScanMode()) { case InstrumentSettings::UNKNOWN: break; case InstrumentSettings::MASSSPECTRUM: case InstrumentSettings::MS1SPECTRUM: case InstrumentSettings::MSNSPECTRUM: if (spec.getInstrumentSettings().getZoomScan()) { os << "\" scanType=\"zoom"; } else { os << "\" scanType=\"Full"; } break; case InstrumentSettings::SIM: os << "\" scanType=\"SIM"; break; case InstrumentSettings::SRM: os << "\" scanType=\"SRM"; break; case InstrumentSettings::CRM: os << "\" scanType=\"CRM"; break; default: os << "\" scanType=\"Full"; warning(STORE, String("Scan type '") + InstrumentSettings::NamesOfScanMode[spec.getInstrumentSettings().getScanMode()] + "' not supported by mzXML. Using 'Full' scan mode!"); } os << "\" retentionTime=\""; if (spec.getRT() < 0) os << "-"; os << "PT" << std::fabs(spec.getRT()) << "S\""; if (!spec.getInstrumentSettings().getScanWindows().empty()) { os << " startMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].begin << "\" endMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].end << "\""; } if (spec.getInstrumentSettings().getScanWindows().size() > 1) { warning(STORE, "The MzXML format can store only one scan window for each scan. Only the first one is stored!"); } os << ">\n"; for (Size i = 0; i < spec.getPrecursors().size(); ++i) { const Precursor& precursor = spec.getPrecursors()[i]; //intensity os << String(ms_level + 2, '\t') << "<precursorMz precursorIntensity=\"" << precursor.getIntensity(); //charge if (precursor.getCharge() != 0) os << "\" precursorCharge=\"" << precursor.getCharge(); //window size if (precursor.getIsolationWindowLowerOffset() + precursor.getIsolationWindowUpperOffset() > 0.0) os << "\" windowWideness=\"" << (precursor.getIsolationWindowUpperOffset() + precursor.getIsolationWindowLowerOffset()); //m/z os << "\">" << precursor.getMZ() << "</precursorMz>\n"; } if (!spec.empty()) { os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\">"; //std::cout << "Writing scan " << s << "\n"; std::vector<Real> tmp; for (Size i = 0; i < spec.size(); i++) { tmp.push_back(spec[i].getMZ()); tmp.push_back(spec[i].getIntensity()); } String encoded; decoder_.encode(tmp, Base64::BYTEORDER_BIGENDIAN, encoded); os << encoded << "</peaks>\n"; } else { os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\" xsi:nil=\"true\"/>\n"; } writeUserParam_(os, spec, ms_level + 2); if (spec.getComment() != "") { os << String(ms_level + 2, '\t') << "<comment>" << spec.getComment() << "</comment>\n"; } //check MS level of next scan and close scans (scans can be nested) UInt next_ms_level = 0; if (s < cexp_->size() - 1) { next_ms_level = ((*cexp_)[s + 1]).getMSLevel(); } //std::cout << "scan: " << s << " this: " << ms_level << " next: " << next_ms_level << "\n"; if (next_ms_level <= ms_level) { for (Size i = 0; i <= ms_level - next_ms_level && !open_scans.empty(); ++i) { os << String(ms_level - i + 1, '\t') << "</scan>\n"; open_scans.pop(); } } } os << "\t</msRun>\n" << "\t<indexOffset>0</indexOffset>\n" << "</mzXML>\n"; logger_.endProgress(); spec_write_counter_ = 1; } } // namespace Internal } // namespace OpenMS #endif
GB_unaryop__minv_int64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_uint8 // op(A') function: GB_tran__minv_int64_uint8 // C type: int64_t // A type: uint8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_uint8 ( int64_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lsh_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ /*********************************************************************** * Author: Vincent Rabaud *************************************************************************/ #ifndef FLANN_LSH_INDEX_H_ #define FLANN_LSH_INDEX_H_ #include <algorithm> #include <cassert> #include <cstring> #include <map> #include <vector> #include "flann/general.h" #include "flann/algorithms/nn_index.h" #include "flann/util/matrix.h" #include "flann/util/result_set.h" #include "flann/util/heap.h" #include "flann/util/lsh_table.h" #include "flann/util/allocator.h" #include "flann/util/random.h" #include "flann/util/saving.h" namespace flann { struct LshIndexParams : public IndexParams { LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) { (* this)["algorithm"] = FLANN_INDEX_LSH; // The number of hash tables to use (*this)["table_number"] = table_number; // The length of the key in the hash tables (*this)["key_size"] = key_size; // Number of levels to use in multi-probe (0 for standard LSH) (*this)["multi_probe_level"] = multi_probe_level; } }; /** * Randomized kd-tree index * * Contains the k-d trees and other information for indexing a set of points * for nearest-neighbor matching. */ template<typename Distance> class LshIndex : public NNIndex<Distance> { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; typedef NNIndex<Distance> BaseClass; /** Constructor * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); } /** Constructor * @param input_data dataset with the input features * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); setDataset(input_data); } LshIndex(const LshIndex& other) : BaseClass(other), tables_(other.tables_), table_number_(other.table_number_), key_size_(other.key_size_), multi_probe_level_(other.multi_probe_level_), xor_masks_(other.xor_masks_) { } LshIndex& operator=(LshIndex other) { this->swap(other); return *this; } virtual ~LshIndex() { freeIndex(); } BaseClass* clone() const { return new LshIndex(*this); } using BaseClass::buildIndex; void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { assert(points.cols==veclen_); size_t old_size = size_; extendDataset(points); if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) { buildIndex(); } else { for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; for (size_t i=old_size;i<size_;++i) { table.add(i, points_[i]); } } } } flann_algorithm_t getType() const { return FLANN_INDEX_LSH; } template<typename Archive> void serialize(Archive& ar) { ar.setObject(this); ar & *static_cast<NNIndex<Distance>*>(this); ar & table_number_; ar & key_size_; ar & multi_probe_level_; ar & xor_masks_; ar & tables_; if (Archive::is_loading::value) { index_params_["algorithm"] = getType(); index_params_["table_number"] = table_number_; index_params_["key_size"] = key_size_; index_params_["multi_probe_level"] = multi_probe_level_; } } void saveIndex(FILE* stream) { serialization::SaveArchive sa(stream); sa & *this; } void loadIndex(FILE* stream) { serialization::LoadArchive la(stream); la & *this; } /** * Computes the index memory usage * Returns: memory used by the index */ int usedMemory() const { return size_ * sizeof(int); } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * Find set of nearest neighbors to vec. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * maxCheck = the maximum number of restarts (in a best-bin-first manner) */ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) const { getNeighbors(vec, result); } protected: /** * Builds the index */ void buildIndexImpl() { tables_.resize(table_number_); std::vector<std::pair<size_t,ElementType*> > features; features.reserve(points_.size()); for (size_t i=0;i<points_.size();++i) { features.push_back(std::make_pair(i, points_[i])); } for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; table = lsh::LshTable<ElementType>(veclen_, key_size_); // Add the features to the table table.add(features); } } void freeIndex() { /* nothing to do here */ } private: /** Defines the comparator on score and index */ typedef std::pair<float, unsigned int> ScoreIndexPair; struct SortScoreIndexPairOnSecond { bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const { return left.second < right.second; } }; /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH * @param key the key we build neighbors from * @param lowest_index the lowest index of the bit set * @param level the multi-probe level we are at * @param xor_masks all the xor mask */ void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, std::vector<lsh::BucketKey>& xor_masks) { xor_masks.push_back(key); if (level == 0) return; for (int index = lowest_index - 1; index >= 0; --index) { // Create a new key lsh::BucketKey new_key = key | (1 << index); fill_xor_mask(new_key, index, level - 1, xor_masks); } } /** Performs the approximate nearest-neighbor search. * @param vec the feature to analyze * @param do_radius flag indicating if we check the radius too * @param radius the radius if it is a radius search * @param do_k flag indicating if we limit the number of nn * @param k_nn the number of nearest neighbors * @param checked_average used for debugging */ void getNeighbors(const ElementType* vec, bool do_radius, float radius, bool do_k, unsigned int k_nn, float& checked_average) { static std::vector<ScoreIndexPair> score_index_heap; if (do_k) { unsigned int worst_score = std::numeric_limits<unsigned int>::max(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; hamming_distance = distance_(vec, points_[*training_index].point, veclen_); if (hamming_distance < worst_score) { // Insert the new element score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); std::push_heap(score_index_heap.begin(), score_index_heap.end()); if (score_index_heap.size() > (unsigned int)k_nn) { // Remove the highest distance value as we have too many elements std::pop_heap(score_index_heap.begin(), score_index_heap.end()); score_index_heap.pop_back(); // Keep track of the worst score worst_score = score_index_heap.front().first; } } } } } } else { typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; // Compute the Hamming distance hamming_distance = distance_(vec, points_[*training_index].point, veclen_); if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); } } } } } /** Performs the approximate nearest-neighbor search. * This is a slower version than the above as it uses the ResultSet * @param vec the feature to analyze */ void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result) const { typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; // Compute the Hamming distance hamming_distance = distance_(vec, points_[*training_index], veclen_); result.addPoint(hamming_distance, *training_index); } } } } void swap(LshIndex& other) { BaseClass::swap(other); std::swap(tables_, other.tables_); std::swap(size_at_build_, other.size_at_build_); std::swap(table_number_, other.table_number_); std::swap(key_size_, other.key_size_); std::swap(multi_probe_level_, other.multi_probe_level_); std::swap(xor_masks_, other.xor_masks_); } /** The different hash tables */ std::vector<lsh::LshTable<ElementType> > tables_; /** table number */ unsigned int table_number_; /** key size */ unsigned int key_size_; /** How far should we look for neighbors in multi-probe LSH */ unsigned int multi_probe_level_; /** The XOR masks to apply to a key to get the neighboring buckets */ std::vector<lsh::BucketKey> xor_masks_; USING_BASECLASS_SYMBOLS }; } #endif //FLANN_LSH_INDEX_H_
ncpdq.c
/* $Header$ */ /* ncpdq -- netCDF pack, re-dimension, query */ /* Purpose: Pack, re-dimension, query single netCDF file and output to a single file */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* Usage: ncpdq -O -D 3 -a lat,lev,lon -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc ncpdq -O -D 3 -a lon,lev,lat -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc ncpdq -O -D 3 -a lon,time -x -v three_double_dmn ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc ncpdq -O -D 3 -P all_new ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -P all_xst ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -P xst_new ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -M dbl_flt ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -M flt_dbl ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -P upk ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -a lon,lat -g g21,g22 ~/nco/data/in_grp_3.nc ~/foo.nc ncpdq -O -D 3 -g g1 -v v1 --union -G dude -p ~/nco/data in_grp.nc ~/foo.nc */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* atof, atoi, malloc, getopt */ #include <string.h> /* strcmp() */ #include <time.h> /* machine time */ #ifndef _MSC_VER # include <unistd.h> /* POSIX stuff */ #endif #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ #ifdef I18N # include <langinfo.h> /* nl_langinfo() */ # include <libintl.h> /* Internationalization i18n */ # include <locale.h> /* Locale setlocale() */ # define _(sng) gettext (sng) # define gettext_noop(sng) (sng) # define N_(sng) gettext_noop(sng) #endif /* I18N */ /* Supply stub gettext() function in case i18n failed */ #ifndef _LIBINTL_H # define gettext(foo) foo #endif /* _LIBINTL_H */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI # include <mpi.h> /* MPI definitions */ # include <netcdf_par.h> /* Parallel netCDF definitions */ # include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #include "libnco.h" /* netCDF Operator (NCO) library */ int main(int argc,char **argv) { aed_sct *aed_lst_add_fst=NULL_CEWI; aed_sct *aed_lst_scl_fct=NULL_CEWI; char **dmn_rdr_lst_in=NULL_CEWI; /* Option a */ char **fl_lst_abb=NULL; /* Option n */ char **fl_lst_in=NULL_CEWI; char **gaa_arg=NULL; /* [sng] Global attribute arguments */ char **var_lst_in=NULL_CEWI; char **grp_lst_in=NULL_CEWI; char *aux_arg[NC_MAX_DIMS]; char *cmd_ln; char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in=NULL; char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL_CEWI; char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *lmt_arg[NC_MAX_DIMS]; char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */ char *nco_pck_map_sng=NULL_CEWI; /* [sng] Packing map Option M */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl; /* [sng] Local copy of system optarg */ char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ char add_fst_sng[]="add_offset"; /* [sng] Unidata standard string for add offset */ char scl_fct_sng[]="scale_factor"; /* [sng] Unidata standard string for scale factor */ char trv_pth[]="/"; /* [sng] Root path of traversal tree */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567Aa:CcD:d:Fg:G:hL:l:M:Oo:P:p:Rrt:v:UxZ-:"; cnk_sct cnk; /* [sct] Chunking structure */ cnv_sct *cnv; /* [sct] Convention structure */ #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.flg_ddra=False}; #endif /* !__cplusplus */ dmn_sct **dmn_rdr_trv=NULL; /* [sct] Dimension structures to be re-ordered (from global table) */ extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */ int *in_id_arr; int abb_arg_nbr=0; int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */ int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_rdr_nbr=0; /* [nbr] Number of dimension to re-order */ int dmn_rdr_nbr_trv=0; /* [nbr] Number of dimension to re-order (from global table) */ int dmn_rdr_nbr_in=0; /* [nbr] Original number of dimension to re-order */ int fl_idx=int_CEWI; int fl_nbr=0; int fl_in_fmt; /* [enm] Input file format */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int idx=int_CEWI; int idx_rdr=int_CEWI; int in_id; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl; int nbr_var_fix; /* nbr_var_fix gets incremented */ int nbr_var_fl; int nbr_var_prc; /* nbr_var_prc gets incremented */ int nco_pck_map=nco_pck_map_flt_sht; /* [enm] Packing map */ int nco_pck_plc=nco_pck_plc_nil; /* [enm] Packing policy */ int opt; int out_id; int ppc_nbr=0; /* [nbr] Number of PPC arguments */ int rcd=NC_NOERR; /* [rcd] Return code */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */ int var_lst_in_nbr=0; int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */ md5_sct *md5=NULL; /* [sct] MD5 configuration */ nco_bool *dmn_rvr_rdr=NULL; /* [flg] Reverse dimensions */ nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ nco_bool FL_RTR_RMT_LCN; nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool IS_REORDER=False; /* Re-order mode */ nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order*/ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ nco_bool flg_dmn_prc_usr_spc=False; /* [flg] Processed dimensions specified on command line */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ var_sct **var; var_sct **var_fix; var_sct **var_fix_out; var_sct **var_out; var_sct **var_prc; var_sct **var_prc_out; trv_tbl_sct *trv_tbl=NULL; /* [lst] Traversal table */ nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */ #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */ {"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"help",no_argument,0,0}, {"hlp",no_argument,0,0}, {"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */ {"mrd",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */ {"multiple_record_dimension",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */ {"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"intersection",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */ {"nsx",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */ {"union",no_argument,0,0}, /* [flg] Select union of specified groups and variables */ {"unn",no_argument,0,0}, /* [flg] Select union of specified groups and variables */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"upk",required_argument,0,0}, /* [enm] Unpacking convention to utilize */ /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"arrange",required_argument,0,'a'}, {"permute",required_argument,0,'a'}, {"reorder",required_argument,0,'a'}, {"rdr",required_argument,0,'a'}, {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"gpe",required_argument,0,'G'}, /* [sng] Group Path Edit (GPE) */ {"grp",required_argument,0,'g'}, {"group",required_argument,0,'g'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"pack_map",required_argument,0,'M'}, {"pck_map",required_argument,0,'M'}, {"map",required_argument,0,'M'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"output",required_argument,0,'o'}, {"fl_out",required_argument,0,'o'}, {"pack_policy",required_argument,0,'P'}, {"pck_plc",required_argument,0,'P'}, {"path",required_argument,0,'p'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"unpack",no_argument,0,'U'}, {"variable",required_argument,0,'v'}, {"auxiliary",required_argument,0,'X'}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ /* Initialize traversal table */ trv_tbl_init(&trv_tbl); /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); #ifdef ENABLE_MPI /* MPI Initialization */ if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm); MPI_Init(&argc,&argv); MPI_Comm_size(mpi_cmm,&prc_nbr); MPI_Comm_rank(mpi_cmm,&prc_rnk); #endif /* !ENABLE_MPI */ /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){ cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_csh_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */ if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */ if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); } /* endif "help" */ if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"mrd") || !strcmp(opt_crr,"multiple_record_dimension")) nco_mrd_cnv=nco_mrd_allow; /* [enm] Multiple Record Dimension convention */ if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){ ppc_arg[ppc_nbr]=(char *)strdup(optarg); ppc_nbr++; } /* endif "ppc" */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"unn") || !strcmp(opt_crr,"union")) GRP_VAR_UNN=True; if(!strcmp(opt_crr,"nsx") || !strcmp(opt_crr,"intersection")) GRP_VAR_UNN=False; if(!strcmp(opt_crr,"upk")){ /* [enm] Unpacking convention to utilize */ nco_upk_cnv=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=!FORCE_APPEND; break; case 'a': /* Re-order dimensions */ flg_dmn_prc_usr_spc=True; dmn_rdr_lst_in=nco_lst_prs_2D(optarg,",",&dmn_rdr_nbr_in); dmn_rdr_nbr=dmn_rdr_nbr_in; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* Debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'G': /* Apply Group Path Editing (GPE) to output group */ /* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */ gpe=nco_gpe_prs_arg(optarg); fl_out_fmt=NC_FORMAT_NETCDF4; break; case 'g': /* Copy group argument for later processing */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'M': /* Packing map */ nco_pck_map_sng=(char *)strdup(optarg); nco_pck_map=nco_pck_map_get(nco_pck_map_sng); break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'P': /* Packing policy */ nco_pck_plc_sng=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'U': /* Unpacking switch */ nco_pck_plc_sng=(char *)strdup("upk"); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr=var_lst_in_nbr; break; case 'X': /* Copy auxiliary coordinate argument for later processing */ aux_arg[aux_nbr]=(char *)strdup(optarg); aux_nbr++; MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */ (void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Set/report global chunk cache */ rcd+=nco_cnk_csh_ini(cnk_csh_byt); /* Set re-order flag */ if(dmn_rdr_nbr > 0) IS_REORDER=True; /* No re-order dimensions specified implies packing request */ if(dmn_rdr_nbr == 0){ if(nco_pck_plc == nco_pck_plc_nil) nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: DEBUG Packing map is %s and packing policy is %s\n",nco_prg_nm_get(),nco_pck_map_sng_get(nco_pck_map),nco_pck_plc_sng_get(nco_pck_plc)); } /* dmn_rdr_nbr != 0 */ /* From this point forward, assume ncpdq operator packs or re-orders, not both */ if(dmn_rdr_nbr > 0 && nco_pck_plc != nco_pck_plc_nil){ (void)fprintf(fp_stdout,"%s: ERROR %s does not support simultaneous dimension re-ordering (-a switch) and packing (-P switch).\nHINT: Invoke %s twice, once to re-order (with -a), and once to pack (with -P).\n",nco_prg_nm,nco_prg_nm,nco_prg_nm); nco_exit(EXIT_FAILURE); } /* endif */ /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filename */ fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Get file format */ (void)nco_inq_format(in_id,&fl_in_fmt); /* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */ (void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,xtr_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl); /* Were all user-specified dimensions found? */ (void)nco_chk_dmn(lmt_nbr,flg_dne); /* Create reversed dimension list */ if(dmn_rdr_nbr_in > 0){ dmn_rvr_rdr=(nco_bool *)nco_malloc(dmn_rdr_nbr_in*sizeof(nco_bool)); /* Is dimension to be reversed? i.e., does string begin with minus-sign '-'? */ for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){ if(dmn_rdr_lst_in[idx_rdr][0] == '-'){ dmn_rvr_rdr[idx_rdr]=True; /* Strip-out '-': Copy string to new memory one past negative sign to avoid losing byte */ optarg_lcl=dmn_rdr_lst_in[idx_rdr]; dmn_rdr_lst_in[idx_rdr]=(char *)strdup(optarg_lcl+1L); optarg_lcl=(char *)nco_free(optarg_lcl); }else{ dmn_rvr_rdr[idx_rdr]=False; } /* !'-' */ } /* !idx_rdr */ } /* !dmn_rdr_nbr_in */ /* Get number of variables, dimensions, and global attributes in file, file format */ (void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,(int *)NULL,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl); /* Create list of dimensions to average(ncwa)/re-order(ncpdq) */ if(IS_REORDER) (void)nco_dmn_avg_mk(in_id,dmn_rdr_lst_in,dmn_rdr_nbr_in,flg_dmn_prc_usr_spc,False,trv_tbl,&dmn_rdr_trv,&dmn_rdr_nbr_trv); /* Fill-in variable structure list for all extracted variables */ var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl); /* Duplicate to output array */ var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr;idx++){ var_out[idx]=nco_var_dpl(var[idx]); (void)nco_xrf_var(var[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over variables */ /* Refresh var_out with dim_out data */ (void)nco_var_dmn_refresh(var_out,xtr_nbr); /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Divide variable lists into lists of fixed variables and variables to be processed */ (void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_map,nco_pck_plc,dmn_rdr_trv,dmn_rdr_nbr_trv,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl); /* Store processed and fixed variables info into GTT */ (void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl); /* We now have final list of variables to extract. Phew. */ /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt; /* Initialize, decode, and set PPC information */ if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl); /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Initialize chunking from user-specified inputs */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk); if(IS_REORDER){ dmn_sct **dmn_rdr=NULL; /* [sct] Dimension structures to be re-ordered */ /* "dmn_rdr" is only used for input to function nco_var_dmn_rdr_mtd(), that compares dimensions by short name; this is because the input list of -a are dimension short names; group support is obtained combining with -g option; on input it contains a list of dimension short names (in "dmn_rdr"), that together with input array "dmn_rvr_rdr" of flags that determine if dimension at index dmn_rvr_rdr[index] is to be reversed; use cases: in_grp_8.nc contains the dimensions /g1/lat, /g1/lon, /g2/lat, /g2/lon ncpdq -O -v lat,lon -a -lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc "dmn_rdr" contains names ["lat"], ["lon"], striped of '-' (minus) sign and dmn_rvr_rdr contains [True],[True ] output is reversed /g1/lat, /g1/lon, /g2/lat, /g2/lon ncpdq -O -v lat,lon -a lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc "dmn_rdr" contains names ["lat"], ["lon"], and dmn_rvr_rdr contains [False],[True ] output is reversed /g1/lon, /g2/lon */ /* Form list of re-ordering dimensions from extracted input dimensions */ dmn_rdr=(dmn_sct **)nco_malloc(dmn_rdr_nbr*sizeof(dmn_sct *)); /* Initialize re-ordering dimensions; initialize only short name */ for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){ dmn_rdr[idx_rdr]=(dmn_sct *)nco_malloc(sizeof(dmn_sct)); dmn_rdr[idx_rdr]->nm=(char *)strdup(dmn_rdr_lst_in[idx_rdr]); dmn_rdr[idx_rdr]->nm_fll=NULL; dmn_rdr[idx_rdr]->id=-1; } /* Determine and set new dimensionality in metadata of each re-ordered variable */ (void)nco_var_dmn_rdr_mtd_trv(trv_tbl,nbr_var_prc,var_prc,var_prc_out,nbr_var_fix,var_fix,dmn_rdr,dmn_rdr_nbr,dmn_rvr_rdr); for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_in; idx_rdr++){ dmn_rdr[idx_rdr]->nm=(char *)nco_free(dmn_rdr[idx_rdr]->nm); dmn_rdr[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr[idx_rdr]); } dmn_rdr=(dmn_sct **)nco_free(dmn_rdr); } /* IS_REORDER */ /* Alter metadata for variables that will be packed */ if(nco_pck_plc != nco_pck_plc_nil){ if(nco_pck_plc != nco_pck_plc_upk){ /* Allocate attribute list container for maximum number of entries */ aed_lst_add_fst=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct)); aed_lst_scl_fct=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct)); } /* endif packing */ for(idx=0;idx<nbr_var_prc;idx++){ nco_pck_mtd(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc); if(nco_pck_plc != nco_pck_plc_upk){ /* Use same copy of attribute name for all edits */ aed_lst_add_fst[idx].att_nm=add_fst_sng; aed_lst_scl_fct[idx].att_nm=scl_fct_sng; } /* endif packing */ } /* end loop over var_prc */ /* Transfer variable type to table. NB: Use processed variables set with new type. MUST be done before variable definition. */ (void)nco_var_typ_trv(nbr_var_prc,var_prc_out,trv_tbl); } /* nco_pck_plc == nco_pck_plc_nil */ /* Define dimensions, extracted groups, variables, and attributes in output file. NB: record name is NULL */ (void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,True,False,nco_pck_plc,(char *)NULL,trv_tbl); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ /* Assign zero to start and unity to stride vectors in output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr); /* Copy variable data for non-processed variables */ (void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl); /* Close first input netCDF file */ nco_close(in_id); /* Loop over input files (not currently used, fl_nbr == 1) */ for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){ /* Parse filename */ if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in); /* Make sure file is on local system and is readable or die trying */ if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,", local file is %s",fl_in); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx); /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; #ifdef _OPENMP #pragma omp parallel for private(idx,in_id) shared(aed_lst_add_fst,aed_lst_scl_fct,nco_dbg_lvl,dmn_rdr_nbr,gpe,in_id_arr,nbr_var_prc,nco_pck_map,nco_pck_plc,out_id,nco_prg_nm,rcd,var_prc,var_prc_out,nbr_dmn_fl,trv_tbl,IS_REORDER,fl_out_fmt) #endif /* !_OPENMP */ /* Process all variables in current file */ for(idx=0;idx<nbr_var_prc;idx++){ char *grp_out_fll=NULL; /* [sng] Group name */ int grp_out_id; /* [ID] Group ID (output) */ int var_out_id; /* [ID] Variable ID (output) */ trv_sct *var_trv; /* [sct] Variable GTT object */ in_id=in_id_arr[omp_get_thread_num()]; var_prc[idx]->nc_id=in_id; if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Retrieve variable from disk into memory */ (void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl); /* If re-ordering */ if(IS_REORDER){ if((var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type))) == NULL){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* endif err */ /* Change dimensionionality of values */ (void)nco_var_dmn_rdr_val_trv(var_prc[idx],var_prc_out[idx],trv_tbl); /* Re-ordering required two value buffers, time to free() input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); } /* IS_REORDER */ /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; if(nco_pck_plc != nco_pck_plc_nil){ /* Copy input variable buffer to processed variable buffer */ /* fxm: this is dangerous and leads to double free()'ing variable buffer */ var_prc_out[idx]->val=var_prc[idx]->val; /* (Un-)Pack variable according to packing specification */ nco_pck_val(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc,aed_lst_add_fst+idx,aed_lst_scl_fct+idx); } /* endif nco_pck_plc != nco_pck_plc_nil */ if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); } /* endif ppc */ if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp); #ifdef _OPENMP #pragma omp critical #endif /* _OPENMP */ { /* begin OpenMP critical */ /* Copy variable to output file then free value buffer */ if(var_prc_out[idx]->nbr_dim == 0){ (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); }else{ /* end if variable is scalar */ (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); } /* end if variable is array */ } /* end OpenMP critical */ /* Free current output buffer */ var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp); } /* end (OpenMP parallel for) loop over idx */ if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"\n"); /* Write/overwrite packing attributes for newly packed and re-packed variables Logic here should nearly mimic logic in nco_var_dfn() */ if(nco_pck_plc != nco_pck_plc_nil && nco_pck_plc != nco_pck_plc_upk){ /* ...put file in define mode to allow metadata writing... */ (void)nco_redef(out_id); /* ...loop through all variables that may have been packed... */ for(idx=0;idx<nbr_var_prc;idx++){ char *grp_out_fll=NULL; /* [sng] Group name */ int grp_out_id; /* [ID] Group ID (output) */ int var_out_id; /* [ID] Variable ID (output) */ trv_sct *var_trv; /* [sct] Variable GTT object */ /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* nco_var_dfn() pre-defined dummy packing attributes in output file only for "packable" input variables */ if(nco_pck_plc_typ_get(nco_pck_map,var_prc[idx]->typ_upk,(nc_type *)NULL)){ /* Verify input variable was newly packed by this operator Writing pre-existing (non-re-packed) attributes here would fail because nco_pck_dsk_inq() never fills in var->scl_fct.vp and var->add_fst.vp Logic is same as in nco_var_dfn() (except var_prc[] instead of var[]) If operator newly packed this particular variable... */ if( /* ...either because operator newly packs all variables... */ (nco_pck_plc == nco_pck_plc_all_new_att && nco_pck_map != nco_pck_map_dbl_flt && nco_pck_map != nco_pck_map_flt_dbl) || /* ...or because operator newly packs un-packed variables like this one... */ (nco_pck_plc == nco_pck_plc_all_xst_att && !var_prc[idx]->pck_ram) || /* ...or because operator re-packs packed variables like this one... */ (nco_pck_plc == nco_pck_plc_xst_new_att && var_prc[idx]->pck_ram) ){ /* Replace dummy packing attributes with final values, or delete them */ if(nco_dbg_lvl >= nco_dbg_io) (void)fprintf(stderr,"%s: main() replacing dummy packing attribute values for variable %s\n",nco_prg_nm,var_prc[idx]->nm); (void)nco_aed_prc(grp_out_id,aed_lst_add_fst[idx].id,aed_lst_add_fst[idx]); (void)nco_aed_prc(grp_out_id,aed_lst_scl_fct[idx].id,aed_lst_scl_fct[idx]); } /* endif variable is newly packed by this operator */ } /* !nco_pck_plc_alw */ } /* end loop over var_prc */ /* Take output file out of define mode */ if(hdr_pad == 0UL) (void)nco_enddef(out_id); else (void)nco__enddef(out_id,hdr_pad); } /* nco_pck_plc == nco_pck_plc_nil || nco_pck_plc == nco_pck_plc_upk */ /* Close input netCDF file */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); } /* end loop over fl_idx */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* ncpdq-specific memory cleanup */ if(dmn_rdr_nbr > 0){ if(dmn_rdr_nbr_in > 0) dmn_rdr_lst_in=nco_sng_lst_free(dmn_rdr_lst_in,dmn_rdr_nbr_in); dmn_rvr_rdr=(nco_bool *)nco_free(dmn_rvr_rdr); /* Free dimension list pointers */ for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_trv; idx_rdr++){ dmn_rdr_trv[idx_rdr]->nm=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm); dmn_rdr_trv[idx_rdr]->nm_fll=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm_fll); dmn_rdr_trv[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr_trv[idx_rdr]); } dmn_rdr_trv=(dmn_sct **)nco_free(dmn_rdr_trv); /* Dimension structures in dmn_rdr are owned by dmn and dmn_out, free'd later */ } /* endif dmn_rdr_nbr > 0 */ if(nco_pck_plc != nco_pck_plc_nil){ if(nco_pck_plc_sng) nco_pck_plc_sng=(char *)nco_free(nco_pck_plc_sng); if(nco_pck_map_sng) nco_pck_map_sng=(char *)nco_free(nco_pck_map_sng); if(nco_pck_plc != nco_pck_plc_upk){ /* No need for loop over var_prc variables to free attribute values Variable structures and attribute edit lists share same attribute values Free them only once, and do it in nco_var_free() */ aed_lst_add_fst=(aed_sct *)nco_free(aed_lst_add_fst); aed_lst_scl_fct=(aed_sct *)nco_free(aed_lst_scl_fct); } /* nco_pck_plc == nco_pck_plc_upk */ } /* nco_pck_plc == nco_pck_plc_nil */ /* NCO-generic clean-up */ /* Free individual strings/arrays */ if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_in) fl_in=(char *)nco_free(fl_in); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr); /* Free lists of strings */ if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); /* Free limits */ for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr); if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr); if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr); var_prc=(var_sct **)nco_free(var_prc); var_prc_out=(var_sct **)nco_free(var_prc_out); var_fix=(var_sct **)nco_free(var_fix); var_fix_out=(var_sct **)nco_free(var_fix_out); trv_tbl_free(trv_tbl); for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm); if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne); if(gpe) gpe=(gpe_sct *)nco_gpe_free(gpe); } /* !flg_mmr_cln */ #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
nn_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef RTABMAP_FLANN_NNINDEX_H #define RTABMAP_FLANN_NNINDEX_H #include <vector> #include "rtflann/general.h" #include "rtflann/util/matrix.h" #include "rtflann/util/params.h" #include "rtflann/util/result_set.h" #include "rtflann/util/dynamic_bitset.h" #include "rtflann/util/saving.h" namespace rtflann { #define KNN_HEAP_THRESHOLD 250 class IndexBase { public: virtual ~IndexBase() {}; virtual size_t veclen() const = 0; virtual size_t size() const = 0; virtual flann_algorithm_t getType() const = 0; virtual int usedMemory() const = 0; virtual IndexParams getParameters() const = 0; virtual void loadIndex(FILE* stream) = 0; virtual void saveIndex(FILE* stream) = 0; }; /** * Nearest-neighbour index base class */ template <typename Distance> class NNIndex : public IndexBase { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const NNIndex& other) : distance_(other.distance_), last_id_(other.last_id_), size_(other.size_), size_at_build_(other.size_at_build_), veclen_(other.veclen_), index_params_(other.index_params_), removed_(other.removed_), removed_points_(other.removed_points_), removed_count_(other.removed_count_), ids_(other.ids_), points_(other.points_), data_ptr_(NULL) { if (other.data_ptr_) { data_ptr_ = new ElementType[size_*veclen_]; std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } } virtual ~NNIndex() { if (data_ptr_) { delete[] data_ptr_; } } virtual NNIndex* clone() const = 0; /** * Builds the index */ virtual void buildIndex() { freeIndex(); cleanRemovedPoints(); // building index buildIndexImpl(); size_at_build_ = size_; } /** * Builds the index using the specified dataset * @param dataset the dataset to use */ virtual void buildIndex(const Matrix<ElementType>& dataset) { setDataset(dataset); this->buildIndex(); } /** * @brief Incrementally add points to the index. * @param points Matrix with points to be added * @param rebuild_threshold */ virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { throw FLANNException("Functionality not supported by this index"); } /** * Remove point from the index * @param index Index of point to be removed */ virtual void removePoint(size_t id) { if (!removed_) { ids_.resize(size_); for (size_t i=0;i<size_;++i) { ids_[i] = i; } removed_points_.resize(size_); removed_points_.reset(); last_id_ = size_; removed_ = true; } size_t point_index = id_to_index(id); if (point_index!=size_t(-1) && !removed_points_.test(point_index)) { removed_points_.set(point_index); removed_count_++; } } /** * Get point with specific id * @param id * @return */ virtual ElementType* getPoint(size_t id) { size_t index = id_to_index(id); if (index!=size_t(-1)) { return points_[index]; } else { return NULL; } } /** * @return number of features in this index. */ inline size_t size() const { return size_ - removed_count_; } inline size_t removedCount() const { return removed_count_; } inline size_t sizeAtBuild() const { return size_at_build_; } /** * @return The dimensionality of the features in this index. */ inline size_t veclen() const { return veclen_; } /** * Returns the parameters used by the index. * * @return The index parameters */ IndexParams getParameters() const { return index_params_; } template<typename Archive> void serialize(Archive& ar) { IndexHeader header; if (Archive::is_saving::value) { header.h.data_type = flann_datatype_value<ElementType>::value; header.h.index_type = getType(); header.h.rows = size_; header.h.cols = veclen_; } ar & header; // sanity checks if (Archive::is_loading::value) { if (strncmp(header.h.signature, FLANN_SIGNATURE_, strlen(FLANN_SIGNATURE_) - strlen("v0.0")) != 0) { throw FLANNException("Invalid index file, wrong signature"); } if (header.h.data_type != flann_datatype_value<ElementType>::value) { throw FLANNException("Datatype of saved index is different than of the one to be created."); } if (header.h.index_type != getType()) { throw FLANNException("Saved index type is different then the current index type."); } // TODO: check for distance type } ar & size_; ar & veclen_; ar & size_at_build_; bool save_dataset; if (Archive::is_saving::value) { save_dataset = get_param(index_params_,"save_dataset", false); } ar & save_dataset; if (save_dataset) { if (Archive::is_loading::value) { if (data_ptr_) { delete[] data_ptr_; } data_ptr_ = new ElementType[size_*veclen_]; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } for (size_t i=0;i<size_;++i) { ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType)); } } else { if (points_.size()!=size_) { throw FLANNException("Saved index does not contain the dataset and no dataset was provided."); } } ar & last_id_; ar & ids_; ar & removed_; if (removed_) { ar & removed_points_; } ar & removed_count_; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ /*int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = knnSearch(queries, indices_, dists, knn, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; }*/ /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = knnSearch(queries, indices_, dists, knn, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ virtual int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); if (max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=size())) { #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = radiusSearch(queries, indices_, dists, radius, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ virtual int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; // just count neighbors if (params.max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = radiusSearch(queries, indices_, dists, radius, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0; protected: virtual void freeIndex() = 0; virtual void buildIndexImpl() = 0; size_t id_to_index(size_t id) { if (ids_.size()==0) { return id; } size_t point_index = size_t(-1); if (id < ids_.size() && ids_[id]==id) { return id; } else { // binary search size_t start = 0; size_t end = ids_.size(); while (start<end) { size_t mid = (start+end)/2; if (ids_[mid]==id) { point_index = mid; break; } else if (ids_[mid]<id) { start = mid + 1; } else { end = mid; } } } return point_index; } void indices_to_ids(const size_t* in, size_t* out, size_t size) const { if (removed_) { for (size_t i=0;i<size;++i) { out[i] = ids_[in[i]]; } } } void setDataset(const Matrix<ElementType>& dataset) { size_ = dataset.rows; veclen_ = dataset.cols; last_id_ = 0; ids_.clear(); removed_points_.clear(); removed_ = false; removed_count_ = 0; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = dataset[i]; } } void extendDataset(const Matrix<ElementType>& new_points) { size_t new_size = size_ + new_points.rows; if (removed_) { removed_points_.resize(new_size); ids_.resize(new_size); } points_.resize(new_size); for (size_t i=size_;i<new_size;++i) { points_[i] = new_points[i-size_]; if (removed_) { ids_[i] = last_id_++; removed_points_.reset(i); } } size_ = new_size; } void cleanRemovedPoints() { if (!removed_) return; size_t last_idx = 0; for (size_t i=0;i<size_;++i) { if (!removed_points_.test(i)) { points_[last_idx] = points_[i]; ids_[last_idx] = ids_[i]; removed_points_.reset(last_idx); ++last_idx; } } points_.resize(last_idx); ids_.resize(last_idx); removed_points_.resize(last_idx); size_ = last_idx; removed_count_ = 0; } void swap(NNIndex& other) { std::swap(distance_, other.distance_); std::swap(last_id_, other.last_id_); std::swap(size_, other.size_); std::swap(size_at_build_, other.size_at_build_); std::swap(veclen_, other.veclen_); std::swap(index_params_, other.index_params_); std::swap(removed_, other.removed_); std::swap(removed_points_, other.removed_points_); std::swap(removed_count_, other.removed_count_); std::swap(ids_, other.ids_); std::swap(points_, other.points_); std::swap(data_ptr_, other.data_ptr_); } protected: /** * The distance functor */ Distance distance_; /** * Each index point has an associated ID. IDs are assigned sequentially in * increasing order. This indicates the ID assigned to the last point added to the * index. */ size_t last_id_; /** * Number of points in the index (and database) */ size_t size_; /** * Number of features in the dataset when the index was last built. */ size_t size_at_build_; /** * Size of one point in the index (and database) */ size_t veclen_; /** * Parameters of the index. */ IndexParams index_params_; /** * Flag indicating if at least a point was removed from the index */ bool removed_; /** * Array used to mark points removed from the index */ DynamicBitset removed_points_; /** * Number of points removed from the index */ size_t removed_count_; /** * Array of point IDs, returned by nearest-neighbour operations */ std::vector<size_t> ids_; /** * Point data */ std::vector<ElementType*> points_; /** * Pointer to dataset memory if allocated by this index, otherwise NULL */ ElementType* data_ptr_; }; #define USING_BASECLASS_SYMBOLS \ using NNIndex<Distance>::distance_;\ using NNIndex<Distance>::size_;\ using NNIndex<Distance>::size_at_build_;\ using NNIndex<Distance>::veclen_;\ using NNIndex<Distance>::index_params_;\ using NNIndex<Distance>::removed_points_;\ using NNIndex<Distance>::ids_;\ using NNIndex<Distance>::removed_;\ using NNIndex<Distance>::points_;\ using NNIndex<Distance>::extendDataset;\ using NNIndex<Distance>::setDataset;\ using NNIndex<Distance>::cleanRemovedPoints;\ using NNIndex<Distance>::indices_to_ids; } #endif //FLANN_NNINDEX_H
ellpack.c
#include <errno.h> #include <stdio.h> #include "ellpack.h" void print_ellpack_matrix(ellpack_matrix_t ellpack, int num_rows, int width) { printf("\nELLPACK matrix\n"); printf("data\n"); for (int i = 0; i < num_rows; i++) { for (int j = 0; j < width; j++) { if (ellpack.data[i*width+j] == ELLPACK_SENTINEL_VALUE) printf(" * "); else printf(" %.3lf", ellpack.data[i*width+j]); } printf("\n"); } printf("\ndata\n"); for (int i = 0; i < num_rows; i++) { for (int j = 0; j < width; j++) { if (ellpack.indices[i*width+j] == ELLPACK_SENTINEL_VALUE) printf("* "); else printf("%d ", ellpack.indices[i*width+j]); } printf("\n"); } } void init_ellpack(ellpack_matrix_t *ellpack, int num_elems) { ellpack->data = malloc(num_elems*sizeof(double)); ellpack->indices = malloc(num_elems*sizeof(int)); } // `ellpack_matrix_from_matrix_market()` converts a matrix in the // coordinate (COO) format, that is used in the Matrix Market file // format, to a sparse matrix in the ELLPACK storage format. int ellpack_matrix_from_matrix_market(ellpack_matrix_t *ellpack, const matrix_market_t *mm, const matrix_info_t mi) { if (mi.max_nonzeros_per_row > mi.num_columns) return EINVAL; size_t width = mi.max_nonzeros_per_row; double *data = malloc(mi.num_rows*width*sizeof(double *)); int *indices = malloc(mi.num_rows*width*sizeof(int *)); // Preset both indices and data to the sentinel values. #pragma omp parallel for for (int i = 0; i < mi.num_rows; i++) for (int j = 0; j < mi.max_nonzeros_per_row; j++) { indices[i*width + j] = ELLPACK_SENTINEL_INDEX; data[i*width + j] = ELLPACK_SENTINEL_VALUE; } #pragma omp for for (int i = 0; i < mi.num_nonzeros; i++) { size_t row = mm->row_indices[i]; // Find the first column not used, i.e., the first column containing a // sentinel value. int col = 0; while (indices[row*width+col] != ELLPACK_SENTINEL_INDEX && col < width) col++; indices[row*width+col] = mm->column_indices[i]; data[row*width+col] = mm->values[i]; } ellpack->data = data; ellpack->indices = indices; return 0; } void transpose_ellpack(ellpack_matrix_t *in, ellpack_matrix_t *out, const matrix_info_t mi) { for (int i = 0; i < mi.num_rows; i++) { for (int j = 0; j < mi.max_nonzeros_per_row; j++) { out->data[j*mi.num_rows + i] = in->data[i*mi.max_nonzeros_per_row + j]; out->indices[j*mi.num_rows + i] = in->indices[i*mi.max_nonzeros_per_row + j]; } } } void tiled_transpose_ellpack(ellpack_matrix_t *in, ellpack_matrix_t *out, const matrix_info_t mi, int tile_size) { // Take an M x N matrix and transpose submatrices of dimensions T x N to // submatrices of dimenions N x T, resulting in a matrix of dimenions // (M*N/T)xT // // Example: // // N=3 T=2 // +---+---+---+ +---+---+ // | a | b | c | | a | d | // +---+---+---+ +---+---+ // | d | e | f | T=2 | b | e | // M=4 +---+---+---+ ---> M*N/T=6 +---+---+ // | g | h | i | | c | f | // +---+---+---+ +---+---+ // | j | k | l | | g | j | // +---+---+---+ +---+---+ // | h | k | // +---+---+ // | i | l | // +---+---+ // // For this to work, the tile size has to divide the number of rows evenly. const int n = mi.max_nonzeros_per_row; for (int i = 0; i < mi.num_rows; i++) { for (int j = 0; j < mi.max_nonzeros_per_row; j++) { size_t out_row = j + n*(i/tile_size); size_t out_col = i % tile_size; out->data [out_row*tile_size + out_col] = in->data [i*n+j]; out->indices[out_row*tile_size + out_col] = in->indices[i*n+j]; } } }
GB_unop__identity_uint8_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_fc64) // op(A') function: GB (_unop_tran__identity_uint8_fc64) // C type: uint8_t // A type: GxB_FC64_t // cast: uint8_t cij = GB_cast_to_uint8_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = GB_cast_to_uint8_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = GB_cast_to_uint8_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_fc64) ( uint8_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
FasterGossipCommMulti.h
/* Copyright 2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #pragma once #include <hwloc.h> #include <hwloc/cudart.h> #include <omp.h> #include <ucp/api/ucp.h> #include <algorithm> #include "FasterGossipCommMultiTraits.h" #include "mpi.h" #define WARM_UP_ROUND 2 namespace HugeCTR { namespace GossipComm { // The empty call back function for UCP communication API inline void empty_send_callback_func(void *request, ucs_status_t status) {} inline void empty_recv_callback_func(void *request, ucs_status_t status, ucp_tag_recv_info_t *info) {} template <typename data_t_> class FasterGossipCommMulti : public FasterComm { public: using GossipMultiCommTraits = FasterGossipCommMultiAll2AllTraits<data_t_>; using FasterGossipComm = typename GossipMultiCommTraits::FasterGossipComm; using gpu_id_t = typename GossipMultiCommTraits::gpu_id_t; // Ctor FasterGossipCommMulti(const std::string &plan_file, const std::vector<gpu_id_t> &GPU_list, const int num_proc, const int rank, MPI_Comm comm) : GPU_list_(GPU_list), rank_(rank), num_proc_(num_proc), comm_(comm), GossipCommHandle_(num_proc_), local_buffer_(GPU_list_.size()), recv_buffer_(GPU_list_.size()), temp_buf_(GPU_list_.size()), temp_table_(GPU_list_.size(), std::vector<size_t>(GPU_list_.size())), temp_src_(GPU_list_.size()), temp_dst_(GPU_list_.size()), affinity_list_(num_proc_), send_reqs_(GPU_list_.size(), nullptr), recv_reqs_(GPU_list_.size(), nullptr) { // Do some check assert((num_proc_ > 0) && "The number of process is not greater than 0!\n"); assert((rank_ >= 0) && (rank_ < num_proc_) && "The rank of this process is not valid!\n"); // Local and total GPU count num_local_gpu_ = GPU_list_.size(); num_total_gpu_ = num_proc_ * num_local_gpu_; assert((num_local_gpu_ > 0) && "The number of local GPUs is not valid!\n"); // Create MPI_Request buffer // request_ = (MPI_Request* )malloc(2 * num_local_gpu_ * sizeof(MPI_Request)); // Construct the local gossip all2all library for (int stage = 0; stage < num_proc_; stage++) { GossipCommHandle_[stage] = new FasterGossipComm(plan_file, GPU_list_); } // HWLOC variable setup hwloc_topology_init(&topo_); hwloc_topology_set_io_types_filter(topo_, HWLOC_TYPE_FILTER_KEEP_ALL); hwloc_topology_load(topo_); hwloc_cpuset_t ori_cpu_set; hwloc_cpuset_t cpu_set; ori_cpu_set = hwloc_bitmap_alloc(); cpu_set = hwloc_bitmap_alloc(); // Get the original thread binding for recovery hwloc_get_cpubind(topo_, ori_cpu_set, HWLOC_CPUBIND_THREAD); // Get the number of CPU sockets and resize the UCP vector socket_num_ = hwloc_get_nbobjs_by_type(topo_, HWLOC_OBJ_PACKAGE); assert((socket_num_ > 0) && "The number of CPU sockets is not valid!\n"); // Temp variable used to initialize UCP environment ucp_params_t ucp_params; ucp_config_t *ucp_config; ucp_worker_params_t ucp_worker_params; size_t ucp_worker_address_len; std::vector<ucp_ep_params_t> ucp_ep_params(socket_num_ * num_proc_); ucp_context_.resize(socket_num_); ucp_worker_.resize(socket_num_); ucp_worker_address_.resize(socket_num_); ucp_worker_address_book_.resize(socket_num_ * num_proc_); ucp_endpoints_.resize(socket_num_, std::vector<ucp_ep_h>(socket_num_ * num_proc_)); // Initialize UCP Env on different CPU sockets for (int i = 0; i < socket_num_; i++) { // Bind the current thread to run on target CPU socket hwloc_obj_t current_socket = hwloc_get_obj_by_type(topo_, HWLOC_OBJ_PACKAGE, i); hwloc_set_cpubind(topo_, current_socket->cpuset, HWLOC_CPUBIND_THREAD); // Test the place where the current thread is running hwloc_get_last_cpu_location(topo_, cpu_set, HWLOC_CPUBIND_THREAD); char *cpu_string; hwloc_bitmap_asprintf(&cpu_string, cpu_set); printf("On rank %d, the cpu set that current thread is running on is : %s.\n", rank_, cpu_string); free(cpu_string); // Initialize UCP context memset(&ucp_params, 0, sizeof(ucp_params)); ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_ESTIMATED_NUM_EPS; ucp_params.features = UCP_FEATURE_TAG; ucp_params.estimated_num_eps = socket_num_ * num_proc_; ucp_config_read(NULL, NULL, &ucp_config); ucp_init(&ucp_params, ucp_config, &ucp_context_[i]); ucp_config_release(ucp_config); // Initialize UCP worker memset(&ucp_worker_params, 0, sizeof(ucp_worker_params)); ucp_worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; ucp_worker_params.thread_mode = UCS_THREAD_MODE_SINGLE; // only single thread can access this // worker at one time, i.e. no thread // safety. ucp_worker_create(ucp_context_[i], &ucp_worker_params, &ucp_worker_[i]); // Get address for local worker ucp_worker_get_address(ucp_worker_[i], &ucp_worker_address_[i], &ucp_worker_address_len); } // Recover the CPU binding of current thread hwloc_set_cpubind(topo_, ori_cpu_set, HWLOC_CPUBIND_THREAD); // Create EPs for local worker // Allocate address for all(local and remote) workers for (auto &iaddress : ucp_worker_address_book_) { iaddress = (ucp_address_t *)malloc(ucp_worker_address_len); } // Copy local worker address to address table for (int i = 0; i < socket_num_; i++) { memcpy(ucp_worker_address_book_[rank_ * socket_num_ + i], ucp_worker_address_[i], ucp_worker_address_len); } // Using MPI to broadcast address from all ranks to all ranks(all broadcast) for (int iroot = 0; iroot < num_proc_; iroot++) { for (int i = 0; i < socket_num_; i++) { MPI_Bcast(ucp_worker_address_book_[iroot * socket_num_ + i], ucp_worker_address_len, MPI_BYTE, iroot, comm_); } } // Create EPs on local worker to other workers(include itself) for (int socket = 0; socket < socket_num_; socket++) { for (int i = 0; i < socket_num_ * num_proc_; i++) { // Only need to set once if (socket == 0) { memset(&ucp_ep_params[i], 0, sizeof(ucp_ep_params[i])); ucp_ep_params[i].field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS; ucp_ep_params[i].address = ucp_worker_address_book_[i]; } ucp_ep_create(ucp_worker_[socket], &ucp_ep_params[i], &ucp_endpoints_[socket][i]); } } // Allocate affinity list for all GPUs on all nodes for (int i = 0; i < num_proc_; i++) { affinity_list_[i] = (gpu_id_t *)malloc(num_local_gpu_ * sizeof(*affinity_list_[i])); } // Assign each local T-GPU to the local L-socket for (int i = 0; i < num_local_gpu_; i++) { // Find the affinity CPU set that current topo GPU is binding to hwloc_cudart_get_device_cpuset(topo_, GPU_list_[i], cpu_set); hwloc_obj_t affinity_socket = hwloc_get_next_obj_covering_cpuset_by_type(topo_, cpu_set, HWLOC_OBJ_PACKAGE, NULL); affinity_list_[rank_][i] = (gpu_id_t)(affinity_socket->logical_index); } // Using MPI to broadcast GPU locality info to all other ranks for (int iroot = 0; iroot < num_proc_; iroot++) { MPI_Bcast(affinity_list_[iroot], num_local_gpu_ * sizeof(*affinity_list_[iroot]), MPI_BYTE, iroot, comm_); } hwloc_bitmap_free(ori_cpu_set); hwloc_bitmap_free(cpu_set); } // Dtor ~FasterGossipCommMulti() { // free(request_); for (int stage = 0; stage < num_proc_; stage++) { delete GossipCommHandle_[stage]; } // Release UCP EPs for (int socket = 0; socket < socket_num_; socket++) { for (int irank = 0; irank < socket_num_ * num_proc_; irank++) { // Flush all operations associated with the EP and release the EP ucs_status_ptr_t ucs_status_ptr = ucp_ep_close_nb(ucp_endpoints_[socket][irank], UCP_EP_CLOSE_MODE_FLUSH); if (UCS_PTR_IS_ERR(ucs_status_ptr) || UCS_PTR_STATUS(ucs_status_ptr) == UCS_OK) { continue; } // While the releasing is not finished, progress the worker while (ucp_request_check_status(ucs_status_ptr) == UCS_INPROGRESS) { for (int j = 0; j < socket_num_; j++) { ucp_worker_progress(ucp_worker_[j]); } } // Free the request ucp_request_free(ucs_status_ptr); } } // Wait for all ranks to release EPs before releasing any worker MPI_Barrier(comm_); // Release worker address for (int i = 0; i < socket_num_; i++) { ucp_worker_release_address(ucp_worker_[i], ucp_worker_address_[i]); } // Release worker for (int i = 0; i < socket_num_; i++) { ucp_worker_destroy(ucp_worker_[i]); } // Release UCP context for (int i = 0; i < socket_num_; i++) { ucp_cleanup(ucp_context_[i]); } // Free address book for (auto &iaddress : ucp_worker_address_book_) { free(iaddress); } // Free HWLOC topology hwloc_topology_destroy(topo_); // Free GPU affinity list for (int i = 0; i < num_proc_; i++) { free(affinity_list_[i]); } } // Initialize a communication void Initialize(const std::vector<data_t_ *> &src, const std::vector<data_t_ *> &dst, const std::vector<std::vector<size_t>> &send_table, const std::vector<std::vector<size_t>> &recv_table) { // Device restorer CudaDeviceContext context; // record user provide data src_ = src; dst_ = dst; send_table_ = send_table; recv_table_ = recv_table; // Calculate the size of Local buffers and Recv buffers, and allocate on each local GPU for (int i = 0; i < num_local_gpu_; i++) { size_t max_size = 0; for (int j = 0; j < num_proc_; j++) { if (j != rank_) { size_t accum_size = 0; for (int k = 0; k < num_local_gpu_; k++) { accum_size += recv_table_[k][i + j * num_local_gpu_]; } max_size = std::max(max_size, accum_size); } } // Allocate buffers on current topo GPU context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaMalloc(&local_buffer_[i], sizeof(data_t_) * max_size)); CK_CUDA_THROW_(cudaMalloc(&recv_buffer_[i], sizeof(data_t_) * max_size)); } // Max buffer size required by gossip all2all on each GPU std::vector<size_t> max_temp_buf_size(num_local_gpu_, 0); // Initialize all gossip all2all object for (int stage = 0; stage < num_proc_; stage++) { // for first stage, do all2all on local data if (stage == 0) { // Extract the temp table for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { for (int j = 0; j < num_local_gpu_; j++) { temp_table_[i][j] = recv_table_[j][rank_ * num_local_gpu_ + i]; } } // Extract the temp src and dst buffers for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { size_t src_offset = 0; size_t dst_offset = 0; for (int j = 0; j < num_local_gpu_ * rank_; j++) { src_offset += send_table_[i][j]; dst_offset += recv_table_[i][j]; } temp_src_[i] = src_[i] + src_offset; temp_dst_[i] = dst_[i] + dst_offset; } // Initialize the local all2all std::vector<size_t> temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(temp_src_, temp_dst_, temp_table_); // Find the largest buffer size needed on each GPU for (int i = 0; i < num_local_gpu_; i++) { max_temp_buf_size[i] = std::max(temp_buf_size[i], max_temp_buf_size[i]); } } // for later stage, do all2all with data received from previous stage else { // previous stage src node int prev_src_node = (rank_ + num_proc_ - stage) % num_proc_; // Extract the temp table for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { for (int j = 0; j < num_local_gpu_; j++) { temp_table_[i][j] = recv_table_[j][prev_src_node * num_local_gpu_ + i]; } } // Extract the temp dst buffers for local all2all on this stage for (int i = 0; i < num_local_gpu_; i++) { size_t dst_offset = 0; for (int j = 0; j < num_local_gpu_ * prev_src_node; j++) { dst_offset += recv_table_[i][j]; } temp_dst_[i] = dst_[i] + dst_offset; } std::vector<size_t> temp_buf_size; // Initialize the local all2all if (stage % 2 == 0) { temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(local_buffer_, temp_dst_, temp_table_); } else { temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(recv_buffer_, temp_dst_, temp_table_); } // Find the largest buffer size needed on each GPU for (int i = 0; i < num_local_gpu_; i++) { max_temp_buf_size[i] = std::max(temp_buf_size[i], max_temp_buf_size[i]); } } } // Allocate max size temp buffers shared by all gossip all2all for (int i = 0; i < num_local_gpu_; i++) { // Allocate temp buffers on each GPU context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaMalloc(&temp_buf_[i], sizeof(data_t_) * max_temp_buf_size[i])); } // Set the allocated temp buffers to all gossip all2all for (int stage = 0; stage < num_proc_; stage++) { GossipCommHandle_[stage]->set_buf(temp_buf_); } // Run exec() in advance to warm up all buffers used by UCX // For even nodes, 1 run is enough for warm up, for odd nodes, 2 runs is needed for (int i = 0; i < WARM_UP_ROUND; i++) { exec(); } } void exec() { // loop through all stages for (int stage = 0; stage < num_proc_; stage++) { // We cuse 2 threads, one for UCX P2P, one for gossip all2all. In the same stage, these 2 operations // can be executed concurrently #pragma omp parallel default(none) shared(stage, num_proc_, rank_, num_local_gpu_, send_table_, \ affinity_list_, send_reqs_, ucp_endpoints_, socket_num_, \ src_, recv_table_, recv_reqs_, ucp_worker_, \ recv_buffer_, GossipCommHandle_) num_threads(2) { // Each thread grab its ID within this OpenMP thread team int thread_id = omp_get_thread_num(); // Thread 0 do the gossip all2all if (thread_id == 0) { // do local all2all // Execute the local all2all GossipCommHandle_[stage]->exec(); } // Thread 1 do the UCX P2P else { // for all stage except last stage, send and receive data to/from other nodes if (stage < num_proc_ - 1) { // The dst and src rank of local node in this stage int dst_rank = (rank_ + stage + 1) % num_proc_; int src_rank = (rank_ + num_proc_ - stage - 1) % num_proc_; // loop through all local GPUs to send GPU buffers to dst worker for (int i = 0; i < num_local_gpu_; i++) { size_t src_offset = 0; size_t src_len = 0; // Accumulate the offset within the src_buffer for (int j = 0; j < num_local_gpu_ * dst_rank; j++) { src_offset += send_table_[i][j]; } // Accumulate the amount of elements to send to the target node for (int j = 0; j < num_local_gpu_; j++) { src_len += send_table_[i][j + num_local_gpu_ * dst_rank]; } // MPI_Isend(src_[i] + src_offset, sizeof(data_t_) * src_len, MPI_BYTE, dst_rank, i, // comm_, request_ + i); // Prepare the tag for tag-matching massage passing, the tag should identify the user // tag, source worker of the tag and other info ucp_tag_t comm_tag = 0LLU; // MSB 32-bit for original MPI TAG comm_tag |= ((ucp_tag_t)i << 32); // 16-32 bits are source rank comm_tag |= ((ucp_tag_t)(rank_ & 0x0000FFFF) << 16); // The 0-15 bits are source L-socket(worker) comm_tag |= (((ucp_tag_t)(affinity_list_[rank_][i])) & 0x000000000000FFFF); send_reqs_[i] = ucp_tag_send_nb( ucp_endpoints_[affinity_list_[rank_][i]] [dst_rank * socket_num_ + affinity_list_[dst_rank][i]], src_[i] + src_offset, sizeof(data_t_) * src_len, ucp_dt_make_contig(sizeof(char)), comm_tag, empty_send_callback_func); // If the returned request is not a valid pointer, that means that the operation // already finished(failed or completed), the callback will not been // called in these situation and the returned request is not de-referencable thus no // release needed. if (UCS_PTR_IS_ERR(send_reqs_[i]) || UCS_PTR_STATUS(send_reqs_[i]) == UCS_OK) { send_reqs_[i] = nullptr; } } // loop through all local GPUs to receive GPU buffers from src worker for (int i = 0; i < num_local_gpu_; i++) { size_t dst_len = 0; // Accumulate the amount of elements to receive from the source node for (int j = 0; j < num_local_gpu_; j++) { dst_len += recv_table_[j][i + src_rank * num_local_gpu_]; } // MPI_Irecv(recv_buffer_[i], sizeof(data_t_) * dst_len, MPI_BYTE, src_rank, i, comm_, // request_ + num_local_gpu_ +i); // Prepare the tag for tag-matching massage passing, the tag should identify the user // tag, source worker of the tag and other info ucp_tag_t comm_tag = 0LLU; // MSB 32-bit for original MPI TAG comm_tag |= ((ucp_tag_t)i << 32); // 16-32 bits are source rank comm_tag |= ((ucp_tag_t)(src_rank & 0x0000FFFF) << 16); // The 0-15 bits are source L-socket(worker) comm_tag |= (((ucp_tag_t)(affinity_list_[src_rank][i])) & 0x000000000000FFFF); recv_reqs_[i] = ucp_tag_recv_nb(ucp_worker_[affinity_list_[rank_][i]], recv_buffer_[i], sizeof(data_t_) * dst_len, ucp_dt_make_contig(sizeof(char)), comm_tag, (ucp_tag_t)-1, empty_recv_callback_func); // The same as send, but recv API never return UCS_OK, only UCS_ERR_xx or valid // pointer can be returned if (UCS_PTR_IS_ERR(recv_reqs_[i])) { recv_reqs_[i] = nullptr; } } } // for all stage except last stage, wait for UCX communication to finish if (stage < num_proc_ - 1) { // Wait for all send to finish for (int i = 0; i < num_local_gpu_; i++) { // If the current operation is not completed yet, progress it while (send_reqs_[i] != nullptr && ucp_request_check_status(send_reqs_[i]) == UCS_INPROGRESS) { for (int j = 0; j < socket_num_; j++) { ucp_worker_progress(ucp_worker_[j]); } } } // Wait for all receive to finish for (int i = 0; i < num_local_gpu_; i++) { // If the current operation is not completed yet, progress it while (recv_reqs_[i] != nullptr && ucp_request_check_status(recv_reqs_[i]) == UCS_INPROGRESS) { for (int j = 0; j < socket_num_; j++) { ucp_worker_progress(ucp_worker_[j]); } } } // Da-allocate UCP request before going to next round for (int i = 0; i < num_local_gpu_; i++) { if (send_reqs_[i] != nullptr) { ucp_request_free(send_reqs_[i]); send_reqs_[i] = nullptr; } if (recv_reqs_[i] != nullptr) { ucp_request_free(recv_reqs_[i]); recv_reqs_[i] = nullptr; } } // MPI_Waitall(2 * num_local_gpu_, request_, MPI_STATUSES_IGNORE); } } } // Swap recv_buffer and local_buffer pointer. If there is odd nodes, do not swap in the last // stage if (num_proc_ % 2 != 0 && stage == num_proc_ - 1) { continue; } recv_buffer_.swap(local_buffer_); } // stage loop } void reset() { // Device restorer CudaDeviceContext context; // Free local_buffer and recv_buffer, ready for next multi-node all2all for (int i = 0; i < num_local_gpu_; i++) { // Free temp buffers on each GPU context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaFree(local_buffer_[i])); CK_CUDA_THROW_(cudaFree(recv_buffer_[i])); } // Free gossip all2all temp buffers for (int i = 0; i < num_local_gpu_; i++) { context.set_device(GPU_list_[i]); CK_CUDA_THROW_(cudaFree(temp_buf_[i])); } } private: // GPU list std::vector<gpu_id_t> GPU_list_; // GPU count gpu_id_t num_local_gpu_; gpu_id_t num_total_gpu_; // MPI-related resource int rank_; int num_proc_; MPI_Comm comm_; // MPI_Request * request_; // Local gossip all2all library std::vector<FasterGossipComm *> GossipCommHandle_; // Temp local GPU buffers for remote data std::vector<data_t_ *> local_buffer_; std::vector<data_t_ *> recv_buffer_; // Temp local GPU buffers for local all2all std::vector<data_t_ *> temp_buf_; // Buffers and tables provided by users std::vector<data_t_ *> src_; std::vector<data_t_ *> dst_; std::vector<std::vector<size_t>> send_table_; std::vector<std::vector<size_t>> recv_table_; // Temp table for local all2all std::vector<std::vector<size_t>> temp_table_; // Temp src and dst pinter vector for local all2all std::vector<data_t_ *> temp_src_; std::vector<data_t_ *> temp_dst_; // Socket count int socket_num_; // UCP variable: UCP context, UCP worker, UCP address, UCP EP and UCP request std::vector<ucp_context_h> ucp_context_; std::vector<ucp_worker_h> ucp_worker_; std::vector<ucp_address_t *> ucp_worker_address_; std::vector<ucp_address_t *> ucp_worker_address_book_; std::vector<std::vector<ucp_ep_h>> ucp_endpoints_; std::vector<ucs_status_ptr_t> send_reqs_; std::vector<ucs_status_ptr_t> recv_reqs_; // HWLOC variable: topo hwloc_topology_t topo_; // The buffers that record the locality of each GPU in GPU list on each nodes std::vector<gpu_id_t *> affinity_list_; }; // class } // namespace }
GB_unaryop__identity_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_fp64 // op(A') function: GB_tran__identity_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_fp64 ( int32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_bool_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_int8 // op(A') function: GB_tran__ainv_bool_int8 // C type: bool // A type: int8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_int8 ( bool *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ocp_nlp_sqp_rti.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "acados/ocp_nlp/ocp_nlp_sqp_rti.h" // external #include <assert.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/sim/sim_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int size = 0; size += sizeof(ocp_nlp_sqp_rti_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } return size; } void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_opts); opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr); c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { opts->constraints[ii] = constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr); c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_rti_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int ii; int N = dims->N; // SQP RTI opts // opts->compute_dual_sol = 1; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) opts->num_threads = ACADOS_NUM_THREADS; #endif opts->ext_qp_res = 0; // submodules opts // do not compute adjoint in dynamics and constraints int compute_adj = 0; // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); dynamics[ii]->opts_set(dynamics[ii], opts->dynamics[ii], "compute_adj", &compute_adj); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]); constraints[ii]->opts_set(constraints[ii], opts->constraints[ii], "compute_adj", &compute_adj); } return; } void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if(char_!=NULL) { module_length = char_-field; for(ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if(!strcmp(ptr_module, "qp")) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); if(!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); // dynamics for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); // // constraints TODO disabled for now as prevents convergence !!! // for (ii=0; ii<=N; ii++) // config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else { printf("\nerror: ocp_nlp_sqp_rti_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_sqp_rti_dynamics_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_dynamics_config *dyn_config = config->dynamics[stage]; dyn_config->opts_set(dyn_config, opts->dynamics[stage], field, value); return; } void ocp_nlp_sqp_rti_cost_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_cost_config *cost_config = config->cost[stage]; cost_config->opts_set(cost_config, opts->cost[stage], field, value); return; } void ocp_nlp_sqp_rti_constraints_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_constraints_config *constraints_config = config->constraints[stage]; constraints_config->opts_set(constraints_config, opts->constraints[stage], (char *) field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_rti_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; // ocp_nlp_cost_dims **cost_dims = dims->cost; // int ny; int size = 0; size += sizeof(ocp_nlp_sqp_rti_memory); size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // nlp mem size += ocp_nlp_memory_calculate_size(config, dims); // stat int stat_m = 1+1; int stat_n = 2; if(opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align // make_int_multiple_of(64, &size); return size; } void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // extract dims int N = dims->N; // ocp_nlp_cost_dims **cost_dims = dims->cost; // int ny; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_memory); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims); // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost mem->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr); c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints mem->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { mem->constraints[ii] = constraints[ii]->memory_assign( constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr); c_ptr += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // stat mem->stat = (double *) c_ptr; mem->stat_m = 1+1; mem->stat_n = 2; if(opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_rti_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // loop index int ii; // extract dims int N = dims->N; int *nx = dims->nx; int *nu = dims->nu; int *nz = dims->nz; int size = 0; int size_tmp = 0; int tmp; // sqp size += sizeof(ocp_nlp_sqp_rti_work); // array of pointers // cost size += (N + 1) * sizeof(void *); // dynamics size += N * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // qp in size += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver); // qp out size += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver); if(opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver); } if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (ii = 0; ii < N; ii++) { tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (ii = 0; ii <= N; ii++) { tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (ii = 0; ii <= N; ii++) { tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } // dzduxt size += (N+1)*sizeof(struct blasfeo_dmat); for(ii=0; ii<=N; ii++) size += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // z_alg size += (N+1)*sizeof(struct blasfeo_dvec); for(ii=0; ii<=N; ii++) size += blasfeo_memsize_dvec(nz[ii]); size += 1*8; // blasfeo_str align size += 1*64; // blasfeo_mem align return size; } // TODO(all): introduce member "memsize" in all structures to make on-line cast cheaper (i.e. avoid // to calculate size on-line) static void ocp_nlp_sqp_rti_cast_workspace(void *config_, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_work *work, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_opts *opts) { ocp_nlp_config *config = (ocp_nlp_config *) config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // loop index int ii; // extract dims int N = dims->N; int *nx = dims->nx; int *nu = dims->nu; int *nz = dims->nz; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_rti_work); // array of pointers // work->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // qp in work->qp_in = ocp_qp_in_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver); // qp out work->qp_out = ocp_qp_out_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver); if(opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver); } if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else int size_tmp = 0; int tmp; // qp solver work->qp_work = (void *) c_ptr; tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } c_ptr += size_tmp; #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } // blasfeo_str align align_char_to(8, &c_ptr); // dzduxt work->dzduxt = (struct blasfeo_dmat *) c_ptr; c_ptr += (N+1)*sizeof(struct blasfeo_dmat); // z_alg work->z_alg = (struct blasfeo_dvec *) c_ptr; c_ptr += (N+1)*sizeof(struct blasfeo_dvec); // blasfeo_mem align align_char_to(64, &c_ptr); // dzduxt for(ii=0; ii<=N; ii++) { blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], work->dzduxt+ii, c_ptr); c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); } // z_alg for(ii=0; ii<=N; ii++) { blasfeo_create_dvec(nz[ii], work->z_alg+ii, c_ptr); c_ptr += blasfeo_memsize_dvec(nz[ii]); } // assert & return assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ static void initialize_qp(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_work *work) { ocp_nlp_config *config = (ocp_nlp_config *) config_; // loop index int ii; // extract dims int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // cost config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], nlp_in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); // dynamics if (ii < N) config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); // constraints config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii], nlp_in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); } return; } static void linearize_update_qp_matrices(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_work *work) { ocp_nlp_config *config = (ocp_nlp_config *) config_; // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; ocp_nlp_memory *nlp_mem = mem->nlp_mem; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, work->qp_in->RSQrq+i, 0, 0); // dynamics if (i < N) config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], nlp_in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], nlp_in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], nlp_in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, nlp_mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, nlp_mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, nlp_mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, nlp_mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], nlp_mem->dyn_adj+i, nu[i], nlp_mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, nlp_mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, nlp_mem->ineq_adj + i, 0); } // TODO(all): still to clean !!!!!!!!!!!!! for (i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if(i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } return; } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? static void sqp_update_qp_vectors(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_work *work) { // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; ocp_nlp_memory *nlp_mem = mem->nlp_mem; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], nlp_mem->cost_grad + i, 0, work->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], nlp_mem->dyn_fun + i, 0, work->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], nlp_mem->ineq_fun + i, 0, work->qp_in->d + i, 0); } return; } static void sqp_update_variables(ocp_nlp_dims *dims, ocp_nlp_out *nlp_out, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_work *work) { // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; // TODO(all): fix and move where appropriate // for (i = 0; i < N; i++) // { // nx1 = dims->constraints[i+1]->nx; // for (j = 0; j < nx1; j++) // { // work->sim_in[i]->S_adj[j] = -BLASFEO_DVECEL(&work->qp_out->pi[i], j); // } // } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // (full) step in primal variables blasfeo_daxpy(nv[i], 1.0, work->qp_out->ux + i, 0, nlp_out->ux + i, 0, nlp_out->ux + i, 0); // absolute in dual variables if (i < N) blasfeo_dveccp(nx[i + 1], work->qp_out->pi + i, 0, nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->qp_out->lam + i, 0, nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->qp_out->t + i, 0, nlp_out->t + i, 0); if (i < N) blasfeo_dveccp(nz[i], work->z_alg+i, 0, nlp_out->z+i, 0); } return; } // Simple fixed-step Gauss-Newton based SQP routine int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { // acados timer acados_timer timer0, timer1; // start timer acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_work *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, work, mem, opts); // zero timers double total_time = 0.0; mem->time_qp_sol = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; // extract dims int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(work->qp_in->BAbt+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->dynamics[ii]); // config->dynamics[ii]->memory_set_z_alg_ptr(nlp_out->z+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr(work->dzduxt+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_guess_ptr(nlp_out->z+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(work->z_alg+ii, mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux + ii, mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr(work->z_alg+ii, mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr(work->dzduxt+ii, mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq + ii, mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(work->qp_in->Z + ii, mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(work->qp_in->DCt+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(work->qp_in->idxb[ii], mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_ptr(work->qp_in->idxs[ii], mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, work->qp_in->RSQrq, mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, work->qp_in->rqz, mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, work->qp_in->BAbt, mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, work->qp_in->b, mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, work->qp_in->idxb, mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, work->qp_in->DCt, mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, work->qp_out->ux, mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, work->qp_out->pi, mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, work->qp_out->lam, mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (int ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP initialize_qp(config, dims, nlp_in, nlp_out, opts, mem, work); // SQP body // start timer acados_tic(&timer1); // linearizate NLP and update QP matrices linearize_update_qp_matrices(config, dims, nlp_in, nlp_out, opts, mem, work); // stop timer mem->time_lin += acados_toc(&timer1); // update QP rhs for SQP (step prim var, abs dual var) sqp_update_qp_vectors(config, dims, nlp_in, nlp_out, opts, mem, work); // save statistics // mem->stat[mem->stat_n*1+0] = qp_status; // mem->stat[mem->stat_n*1+1] = qp_iter; // start timer acados_tic(&timer1); // regularize Hessian config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem); // stop timer mem->time_reg += acados_toc(&timer1); // printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter); // print_ocp_qp_in(work->qp_in); // exit(1); // TODO no warm start across NLP solutions (yet) int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &tmp_int); // start timer acados_tic(&timer1); // TODO move qp_out in memory !!!!! (it has to be preserved to do warm start) qp_status = qp_solver->evaluate(qp_solver, work->qp_in, work->qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work); // stop timer mem->time_qp_sol += acados_toc(&timer1); // start timer acados_tic(&timer1); // compute correct dual solution in case of Hessian regularization config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem); // stop timer mem->time_reg += acados_toc(&timer1); // TODO move into QP solver memory ??? nlp_out->qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter; qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter; // compute external QP residuals (for debugging) if(opts->ext_qp_res) { ocp_qp_res_compute(work->qp_in, work->qp_out, work->qp_res, work->qp_res_ws); ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(work->qp_out); // if(sqp_iter==1) // exit(1); // save statistics mem->stat[mem->stat_n*1+0] = qp_status; mem->stat[mem->stat_n*1+1] = qp_iter; if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(work->qp_in); // stop timer total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; printf("QP solver returned error status %d\n", qp_status); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_QP_FAILURE; return mem->status; } sqp_update_variables(dims, nlp_out, opts, mem, work); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // stop timer total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; // ocp_nlp_out_print(nlp_out); // print_ocp_qp_in(work->qp_in); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; return mem->status; } int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_work *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, work, mem, opts); // extract dims int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(fuck_lint) checks // TODO(fuck_lint) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { // TODO(fuck_lint) check that ns in opt_var == ns in constraints } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_rti_get(void *config_, void *mem_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = 1; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else { printf("\nerror: output type %s not available in ocp_nlp_sqp_rti module\n", field); exit(1); } } void ocp_nlp_sqp_rti_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_rti_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_rti_opts_update; config->opts_set = &ocp_nlp_sqp_rti_opts_set; config->dynamics_opts_set = &ocp_nlp_sqp_rti_dynamics_opts_set; config->cost_opts_set = &ocp_nlp_sqp_rti_cost_opts_set; config->constraints_opts_set = &ocp_nlp_sqp_rti_constraints_opts_set; config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_rti_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp_rti; config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default; config->precompute = &ocp_nlp_sqp_rti_precompute; config->get = &ocp_nlp_sqp_rti_get; return; }
GB_unaryop__abs_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_uint32 // op(A') function: GB_tran__abs_uint16_uint32 // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_uint32 ( uint16_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residualbased_elimination_builder_and_solver_componentwise.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE ) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "includes/global_pointer_variables.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. This is a specialization of the standard buliding strategy to the case in which a single variable is to be used in the building. the creation of the DofList and the construction of the system matrix is in this case much faster as the neighborhood relationships are considered to be known \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace , class TLinearSolver, class TVariableType > class ResidualBasedEliminationBuilderAndSolverComponentwise : public ResidualBasedEliminationBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedEliminationBuilderAndSolverComponentwise ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> ResidualBasedEliminationBuilderAndSolverType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolverComponentwise( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "name" : "ResidualBasedEliminationBuilderAndSolverComponentwise", "components_wise_variable" : "SCALAR_VARIABLE_OR_COMPONENT" })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); rVar = KratosComponents<TVariableType>::Get(ThisParameters["components_wise_variable"].GetString()); } /** * @brief Default constructor. Constructor. */ explicit ResidualBasedEliminationBuilderAndSolverComponentwise( typename TLinearSolver::Pointer pNewLinearSystemSolver,TVariableType const& Var) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver) , rVar(Var) { /* std::cout << "using the standard builder and solver " << std::endl; */ } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolverComponentwise() override {} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) override { KRATOS_TRY if(!pScheme) KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //create a partition of the element array int number_of_threads = OpenMPUtils::GetNumThreads(); #ifdef _OPENMP int A_size = A.size1(); //creating an array of lock variables of the size of the system matrix std::vector< omp_lock_t > lock_array(A.size1()); for(int i = 0; i<A_size; i++) omp_init_lock(&lock_array[i]); #endif DenseVector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); if (this->GetEchoLevel()>0) { KRATOS_WATCH( number_of_threads ); KRATOS_WATCH( element_partition ); } double start_prod = OpenMPUtils::GetCurrentTime(); #pragma omp parallel for firstprivate(number_of_threads) schedule(static,1) for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1]; unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution (*it)->InitializeNonLinearIteration(CurrentProcessInfo); (*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo); Geometry< Node<3> >& geom = (*it)->GetGeometry(); if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false); for(unsigned int i=0; i<geom.size(); i++) EquationId[i] = geom[i].GetDof(rVar,pos).EquationId(); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array); #else this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId); #endif } } DenseVector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); #pragma omp parallel for firstprivate(number_of_threads) schedule(static,1) for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1]; unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar); // A all elements for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution (*it)->InitializeNonLinearIteration(CurrentProcessInfo); (*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo); Geometry< Node<3> >& geom = (*it)->GetGeometry(); if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false); for(unsigned int i=0; i<geom.size(); i++) { EquationId[i] = geom[i].GetDof(rVar,pos).EquationId(); } #ifdef USE_LOCKS_IN_ASSEMBLY this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array); #else this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId); #endif } } if (this->GetEchoLevel()>0) { double stop_prod = OpenMPUtils::GetCurrentTime(); std::cout << "parallel building time: " << stop_prod - start_prod << std::endl; } #ifdef _OPENMP for(int i = 0; i<A_size; i++) omp_destroy_lock(&lock_array[i]); #endif KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) override { KRATOS_TRY //fills a list of "active" nodes defined as nodes which have neighbours // AND no fixed pressure mActiveNodes.clear(); mActiveNodes.reserve(r_model_part.Nodes().size() ); for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { mActiveNodes.push_back(*(it.base() )); } } //fills the DofList and give a unique progressive tag to each node BaseType::mDofSet.clear(); BaseType::mDofSet.reserve(mActiveNodes.size() ); for(GlobalPointersVector< Node<3> >::iterator iii = mActiveNodes.begin(); iii!=mActiveNodes.end(); iii++) { BaseType::mDofSet.push_back( iii->pGetDof(rVar) ); } //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) ); pA.swap(pNewA); } if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) ); pDx.swap(pNewDx); } if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) ); pb.swap(pNewb); } if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false); #ifdef _OPENMP ParallelConstructGraph(A); #else ConstructGraph(A); #endif } else { if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { //KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true); #ifdef _OPENMP ParallelConstructGraph(A); #else ConstructGraph(A); #endif } } if(Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize,false); if(b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize,false); // //if needed resize the vector for the calculation of reactions if(BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if(BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize,false); } //swapping pointers // pA.swap(pNewA); // pDx.swap(pNewDx); // pb.swap(pNewb); #ifndef __SUNPRO_CC KRATOS_CATCH("") #endif } //************************************************************************** //************************************************************************** void Clear() override { this->mDofSet = DofsArrayType(); if(this->mpReactionsVector != NULL) { TSparseSpace::Clear( (this->mpReactionsVector) ); } // *(this->mpReactionsVector) = TSystemVectorType(); if (this->GetEchoLevel()>1) { KRATOS_WATCH("ResidualBasedEliminationBuilderAndSolver Clear Function called"); } } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolverComponentwise"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ //************************************************************************** //************************************************************************** //************************************************************************** //************************************************************************** void ConstructGraph(TSystemMatrixType& A) { KRATOS_TRY std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize); int total_size = 0; unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar); //constructing the system matrix row by row int index_i; for(GlobalPointersVector< Node<3> >::iterator in = mActiveNodes.begin(); in!=mActiveNodes.end(); in++) { const Node<3>::DofType& current_dof = in->GetDof(rVar,pos); if( current_dof.IsFixed() == false) { index_i = (current_dof).EquationId(); GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES); std::vector<int>& indices = index_list[index_i]; indices.reserve(neighb_nodes.size()+1); //filling the first neighbours list indices.push_back(index_i); for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos); if(neighb_dof.IsFixed() == false ) { int index_j = (neighb_dof).EquationId(); indices.push_back(index_j); } } //sorting the indices and elminating the duplicates std::sort(indices.begin(),indices.end()); typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end()); indices.erase(new_end,indices.end()); total_size += indices.size(); } } A.reserve(total_size,false); //setting to zero the matrix (and the diagonal matrix) for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++) { std::vector<int>& indices = index_list[i]; for(unsigned int j=0; j<indices.size(); j++) { A.push_back(i,indices[j] , 0.00); } } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** //************************************************************************** //************************************************************************** #ifdef _OPENMP void ParallelConstructGraph(TSystemMatrixType& A) { #ifndef __SUNPRO_CC KRATOS_TRY #endif std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize); int number_of_threads = omp_get_max_threads(); unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar); //constructing the system matrix row by row DenseVector<unsigned int> partition; DenseVector<unsigned int> local_sizes(number_of_threads); for(int i=0; i<number_of_threads; i++) local_sizes[i] = 0; CreatePartition(number_of_threads, mActiveNodes.size(), partition); #pragma omp parallel for firstprivate(number_of_threads,pos) schedule(static,1) for(int k=0; k<number_of_threads; k++) { GlobalPointersVector< Node<3> >::iterator it_begin = mActiveNodes.begin()+partition[k]; GlobalPointersVector< Node<3> >::iterator it_end = mActiveNodes.begin()+partition[k+1]; for(GlobalPointersVector< Node<3> >::iterator in = it_begin; in!=it_end; in++) { const Node<3>::DofType& current_dof = in->GetDof(rVar,pos); if( current_dof.IsFixed() == false) { int index_i = (current_dof).EquationId(); GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES); std::vector<int>& indices = index_list[index_i]; indices.reserve(neighb_nodes.size()+1); //filling the first neighbours list indices.push_back(index_i); for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos); if(neighb_dof.IsFixed() == false ) { int index_j = (neighb_dof).EquationId(); indices.push_back(index_j); } } //sorting the indices and elminating the duplicates std::sort(indices.begin(),indices.end()); typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end()); indices.erase(new_end,indices.end()); local_sizes[k] += indices.size(); } } } //calculate the total size of the system int total_size = 0.0; for(int i=0; i<number_of_threads; i++) total_size += local_sizes[i]; A.reserve(total_size,false); //setting to zero the matrix (and the diagonal matrix) for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++) { std::vector<int>& indices = index_list[i]; for(unsigned int j=0; j<indices.size(); j++) { A.push_back(i,indices[j] , 0.00); } } #ifndef __SUNPRO_CC KRATOS_CATCH("") #endif } #endif /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ TVariableType const & rVar; GlobalPointersVector<Node<3> > mActiveNodes; /*@} */ /**@name Private Operators*/ /*@{ */ //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, DenseVector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class ResidualBasedEliminationBuilderAndSolverComponentwise */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE defined */
GB_builder.c
//------------------------------------------------------------------------------ // GB_builder: build a matrix from tuples //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLED BY: GB_build, GB_wait, GB_transpose, GB_concat_hyper // This function is called by GB_build to build a matrix T for GrB_Matrix_build // or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending // tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can // appear if called by GB_build or GB_wait, but not GB_transpose. // The indices are provided either as (I_input,J_input) or (I_work,J_work), not // both. The values are provided as S_input or S_work, not both. On return, // the *work arrays are either transplanted into T, or freed, since they are // temporary workspaces. // The work is done in major 5 Steps, some of which can be skipped, depending // on how the tuples are provided (*_work or *_input), and whether or not they // are sorted, or have duplicates. If vdim <= 1, some work is skipped (for // GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on // input. Let p be the # of threads used. // STEP 1: copy user input. O(e/p) read/write per thread, or skipped. // STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if // the tuples are already sorted. // STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no // duplicates, or skipped if already done. O(e/p) read/writes // per thread if duplicates appear. // STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if // T is a vector. // STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the // values can be transplanted into T as-is. // For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already // sorted with no duplicates, and no typecasting needs to be done, then Step 1 // still must be done (each thread does O(e/p) reads of (I_input,J_input) and // writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3 // are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then // I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread // to copy Sx into T->x. // For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes // per thread. The input is always a vector, so vdim == 1 always holds. Step // 2 is skipped if the indices are already sorted, and Step 3 does no work at // all unless duplicates appear. Step 4 takes no time, for any vector. Step 5 // does O(e/p) reads/writes per thread. // For GB_wait: the pending tuples are provided as I_work, J_work, and S_work, // so Step 1 is skipped (no need to check for invalid indices). The input // J_work may be null (vdim can be anything, since GB_wait is used for both // vectors and matrices). The tuples might be in sorted order already, which // is known precisely known from A->Pending->sorted. Step 2 does // O((e log e)/p) work to sort the tuples. Duplicates may appear, and // out-of-order tuples are likely. Step 3 does O(e/p) read/writes. Step 4 // does O(e/p) reads per thread of (I_work,J_work), or just I_work. Step 5 // does O(e/p) read/writes per thread, or O(1) time if S_work can be // transplanted into T->x. // For GB_transpose: uses I_work, J_work, and either S_input (if no op applied // to the values) or S_work (if an op was applied to the A->x values). This is // only done for matrices, not vectors, so vdim > 1 will always hold. The // indices are valid so Step 1 is skipped. The tuples are not sorted, so Step // 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so // Step 3 only does O(e/p) reads of J_work to count the vectors in each slice. // Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5 // does O(e/p) read/writes per thread, but it uses the simpler case in // GB_reduce_build_template since no duplicates can appear. It is unlikely // able to transplant S_work into T->x since the input will almost always be // unsorted. // For GB_concat_hyper: uses I_work, J_work, and S_work. No duplicates // appear. Tuples are not sorted on input. I_work is transplanted into C->i. // J_work and S_work are freed on output. S_work is not transplanted into // C->x. // For iso inputs/outputs: T and Sx have the same iso property. If // they are iso, then dup is always NULL. Duplicates may or may not appear // if T and Sx are iso. // (1) GrB_Matrix_build, GrB_Vector_build, and GB_wait do not pass in an iso // Sx array, where Sx is S_input for GrB*build, and S_work for GB_wait. // Sx and Tx are not iso. Duplicates may appear. dup is always present // for GrB*build, but may be either NULL or non-NULL for GB_wait. // (2) GxB_Matrix_build_Scalar and GxB_Vector_build_Scalar: always construct // iso matrices. For those methods Sx and Tx are always iso, and no dup // operator is be passed in (dup is NULL here, which is the implied 2nd // operator). Duplicates may appear. // (3) GB_transpose and GB_concat_hyper can pass in Sx as iso or // non-iso, and always passes in dup as NULL since there are no // duplicates. Sx and Tx are either both iso, or both non-iso. // This method always returns T as hypersparse, and T is iso if and only // if Sx is iso. #include "GB_build.h" #include "GB_sort.h" #include "GB_binop.h" #ifndef GBCOMPACT #include "GB_red__include.h" #endif #define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t]) #define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t])) #define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t])) #define GB_FREE_WORK \ { \ GB_WERK_POP (Work, int64_t) ; \ GB_FREE (I_work_handle, *I_work_size_handle) ; \ GB_FREE (J_work_handle, *J_work_size_handle) ; \ GB_FREE (S_work_handle, *S_work_size_handle) ; \ GB_FREE_WERK (&K_work, K_work_size) ; \ } //------------------------------------------------------------------------------ // GB_builder //------------------------------------------------------------------------------ GrB_Info GB_builder // build a matrix from tuples ( GrB_Matrix T, // matrix to build, static or dynamic header const GrB_Type ttype, // type of output matrix T const int64_t vlen, // length of each vector of T const int64_t vdim, // number of vectors in T const bool is_csc, // true if T is CSC, false if CSR int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples size_t *I_work_size_handle, int64_t **J_work_handle, // for (j,i,k) tuples size_t *J_work_size_handle, GB_void **S_work_handle, // array of values of tuples, size ijslen, // or size 1 if S is iso size_t *S_work_size_handle, bool known_sorted, // true if tuples known to be sorted bool known_no_duplicates, // true if tuples known to not have dupl int64_t ijslen, // size of I_work and J_work arrays const bool is_matrix, // true if T a GrB_Matrix, false if vector const int64_t *restrict I_input,// original indices, size nvals const int64_t *restrict J_input,// original indices, size nvals const GB_void *restrict S_input,// array of values of tuples, size nvals, // or size 1 if S_input or S_work are iso const bool S_iso, // true if S_input or S_work are iso const int64_t nvals, // number of tuples, and size of K_work const GrB_BinaryOp dup, // binary function to assemble duplicates, // if NULL use the SECOND operator to // keep the most recent duplicate. const GrB_Type stype, // the type of S_work or S_input GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (T != NULL) ; // T is a static or dynamic header on input ASSERT (nvals >= 0) ; ASSERT_TYPE_OK (ttype, "ttype for builder", GB0) ; ASSERT_BINARYOP_OK_OR_NULL (dup, "dup for builder", GB0) ; ASSERT (I_work_handle != NULL) ; ASSERT (J_work_handle != NULL) ; ASSERT (S_work_handle != NULL) ; ASSERT (!GB_OP_IS_POSITIONAL (dup)) ; ASSERT (I_work_size_handle != NULL) ; ASSERT (J_work_size_handle != NULL) ; ASSERT (S_work_size_handle != NULL) ; //-------------------------------------------------------------------------- // get Sx //-------------------------------------------------------------------------- GB_void *restrict S_work = (*S_work_handle) ; const GB_void *restrict Sx = (S_work == NULL) ? S_input : S_work ; ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ; ASSERT (GB_IMPLIES (S_iso, ttype == stype)) ; ASSERT (GB_IMPLIES (S_iso, dup == NULL)) ; ASSERT (GB_IMPLIES (S_iso, nvals > 0)) ; //========================================================================== // symbolic phase of the build ============================================= //========================================================================== // The symbolic phase sorts the tuples and finds any duplicates. The // output matrix T is constructed (not including T->i and T->x), and T->h // and T->p are computed. Then I_work is transplanted into T->i, or T->i is // allocated. T->x is then allocated. It is not computed until the // numeric phase. // When this function returns, I_work is either freed or transplanted into // T->i. J_work is freed, and the I_work and J_work pointers (in the // caller) are set to NULL by setting their handles to NULL. Note that // J_work may already be NULL on input, if T has one or zero vectors // (J_work_handle is always non-NULL however). GrB_Info info ; int64_t *restrict I_work = (*I_work_handle) ; int64_t *restrict J_work = (*J_work_handle) ; int64_t *restrict K_work = NULL ; size_t K_work_size = 0 ; ASSERT (*J_work_size_handle == GB_Global_memtable_size (J_work)) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Work, int64_t) ; GB_WERK_PUSH (Work, 5*(nthreads+1), int64_t) ; if (Work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } memset (Work, 0, Work_nitems * sizeof (int64_t)) ; int64_t *restrict tstart_slice = Work ; // nthreads+1 int64_t *restrict tnvec_slice = Work + (nthreads+1) ; // nthreads+1 int64_t *restrict tnz_slice = Work + 2*(nthreads+1) ; // nthreads+1 int64_t *restrict kbad = Work + 3*(nthreads+1) ; // nthreads int64_t *restrict ilast_slice = Work + 4*(nthreads+1) ; // nthreads //-------------------------------------------------------------------------- // partition the tuples for the threads //-------------------------------------------------------------------------- // Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1. // Each thread handles about the same number of tuples. This partition // depends only on nvals. GB_eslice (tstart_slice, nvals, nthreads) ; // tstart_slice [tid]: first tuple in slice tid // tnvec_slice [tid]: # of vectors that start in a slice. If a vector // starts in one slice and ends in another, it is // counted as being in the first slice. // tnz_slice [tid]: # of entries in a slice after removing duplicates // sentinel values for the final cumulative sum tnvec_slice [nthreads] = 0 ; tnz_slice [nthreads] = 0 ; // this becomes true if the first pass computes tnvec_slice and tnz_slice, // and if the (I_input,J_input) tuples were found to be already sorted with // no duplicates present. bool tnvec_and_tnz_slice_computed = false ; //-------------------------------------------------------------------------- // STEP 1: copy user input and check if valid //-------------------------------------------------------------------------- // If the indices are provided by (I_input,J_input), then import them into // (I_work,J_work) and check if they are valid, and sorted. If the input // happens to be already sorted, then duplicates are detected and the # of // vectors in each slice is counted. if (I_work == NULL) { //---------------------------------------------------------------------- // allocate I_work //---------------------------------------------------------------------- // allocate workspace to load and sort the index tuples: // vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k] // vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and // j = J_input [k]. If the tuples are found to be already sorted on // input, then J_work is not allocated, and J_input is used instead. // The k value in the tuple gives the position in the original set of // tuples: I_input [k] and Sx [k] when vdim <= 1, and also J_input [k] // for matrices with vdim > 1. // The workspace I_work and J_work are allocated here but freed (or // transplanted) inside GB_builder. K_work is allocated, used, and // freed in GB_builder. ASSERT (J_work == NULL) ; I_work = GB_MALLOC (nvals, int64_t, I_work_size_handle) ; (*I_work_handle) = I_work ; ijslen = nvals ; if (I_work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // create the tuples to sort, and check for any invalid indices //---------------------------------------------------------------------- known_sorted = true ; bool no_duplicates_found = true ; if (nvals == 0) { // nothing to do } else if (is_matrix) { //------------------------------------------------------------------ // C is a matrix; check both I_input and J_input //------------------------------------------------------------------ ASSERT (J_input != NULL) ; ASSERT (I_work != NULL) ; ASSERT (vdim >= 0) ; ASSERT (I_input != NULL) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(&&:known_sorted) reduction(&&:no_duplicates_found) for (tid = 0 ; tid < nthreads ; tid++) { kbad [tid] = -1 ; int64_t my_tnvec = 0 ; int64_t kstart = tstart_slice [tid] ; int64_t kend = tstart_slice [tid+1] ; int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ; int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ; for (int64_t k = kstart ; k < kend ; k++) { // get k-th index from user input: (i,j) int64_t i = I_input [k] ; int64_t j = J_input [k] ; if (i < 0 || i >= vlen || j < 0 || j >= vdim) { // halt if out of bounds kbad [tid] = k ; break ; } // check if the tuples are already sorted known_sorted = known_sorted && ((jlast < j) || (jlast == j && ilast <= i)) ; // check if this entry is a duplicate of the one before it no_duplicates_found = no_duplicates_found && (!(jlast == j && ilast == i)) ; // copy the tuple into I_work. J_work is done later. I_work [k] = i ; if (j > jlast) { // vector j starts in this slice (but this is // valid only if J_input is sorted on input) my_tnvec++ ; } // log the last index seen ilast = i ; jlast = j ; } // these are valid only if I_input and J_input are sorted on // input, with no duplicates present. tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = kend - kstart ; } // collect the report from each thread for (int tid = 0 ; tid < nthreads ; tid++) { if (kbad [tid] >= 0) { // invalid index int64_t i = I_input [kbad [tid]] ; int64_t j = J_input [kbad [tid]] ; int64_t row = is_csc ? i : j ; int64_t col = is_csc ? j : i ; int64_t nrows = is_csc ? vlen : vdim ; int64_t ncols = is_csc ? vdim : vlen ; GB_FREE_WORK ; GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, "index (" GBd "," GBd ") out of bounds," " must be < (" GBd ", " GBd ")", row, col, nrows, ncols) ; } } // if the tuples were found to be already in sorted order, and if // no duplicates were found, then tnvec_slice and tnz_slice are now // valid, Otherwise, they can only be computed after sorting. tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ; //------------------------------------------------------------------ // allocate J_work, if needed //------------------------------------------------------------------ if (vdim > 1 && !known_sorted) { // copy J_input into J_work, so the tuples can be sorted J_work = GB_MALLOC (nvals, int64_t, J_work_size_handle) ; (*J_work_handle) = J_work ; if (J_work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads); } else { // J_work is a shallow copy of J_input. The pointer is not // copied into (*J_work_handle), so it will not be freed. // J_input is not modified, even though it is typecast to the // int64_t *J_work, since J_work is not modified in this case. J_work = (int64_t *) J_input ; } } else { //------------------------------------------------------------------ // C is a typecasted GrB_Vector; check only I_input //------------------------------------------------------------------ ASSERT (I_input != NULL) ; ASSERT (J_input == NULL) ; ASSERT (vdim == 1) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(&&:known_sorted) reduction(&&:no_duplicates_found) for (tid = 0 ; tid < nthreads ; tid++) { kbad [tid] = -1 ; int64_t kstart = tstart_slice [tid] ; int64_t kend = tstart_slice [tid+1] ; int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ; for (int64_t k = kstart ; k < kend ; k++) { // get k-th index from user input: (i) int64_t i = I_input [k] ; if (i < 0 || i >= vlen) { // halt if out of bounds kbad [tid] = k ; break ; } // check if the tuples are already sorted known_sorted = known_sorted && (ilast <= i) ; // check if this entry is a duplicate of the one before it no_duplicates_found = no_duplicates_found && (!(ilast == i)) ; // copy the tuple into the work arrays to be sorted I_work [k] = i ; // log the last index seen ilast = i ; } } // collect the report from each thread for (int tid = 0 ; tid < nthreads ; tid++) { if (kbad [tid] >= 0) { // invalid index int64_t i = I_input [kbad [tid]] ; GB_FREE_WORK ; GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, "index (" GBd ") out of bounds, must be < (" GBd ")", i, vlen) ; } } } //---------------------------------------------------------------------- // determine if duplicates are possible //---------------------------------------------------------------------- // The input is now known to be sorted, or not. If it is sorted, and // if no duplicates were found, then it is known to have no duplicates. // Otherwise, duplicates might appear, but a sort is required first to // check for duplicates. known_no_duplicates = known_sorted && no_duplicates_found ; } //-------------------------------------------------------------------------- // STEP 2: sort the tuples in ascending order //-------------------------------------------------------------------------- // If the tuples are known to already be sorted, Step 2 is skipped. In // that case, K_work is NULL (not allocated), which implicitly means that // K_work [k] = k for all k = 0:nvals-1. K_work is always NULL if Sx and // Tx are iso. if (!known_sorted) { //---------------------------------------------------------------------- // allocate K_work workspace (not needed if T and Sx are iso) //---------------------------------------------------------------------- if (!S_iso) { // create the k part of each tuple K_work = GB_MALLOC_WERK (nvals, int64_t, &K_work_size) ; if (K_work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } // The k part of each tuple (i,k) or (j,i,k) records the original // position of the tuple in the input list. This allows an // unstable sorting algorithm to be used. Since k is unique, it // forces the result of the sort to be stable regardless of whether // or not the sorting algorithm is stable. It also keeps track of // where the numerical value of the tuple can be found; it is in // Sx[k] for the tuple (i,k) or (j,i,k), regardless of where the // tuple appears in the list after it is sorted. int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { K_work [k] = k ; } } //---------------------------------------------------------------------- // sort all the tuples //---------------------------------------------------------------------- if (vdim > 1) { //------------------------------------------------------------------ // sort a set of (j,i,k) tuples //------------------------------------------------------------------ if (S_iso) { // K_work is NULL; only sort (j,i) info = GB_msort_2 (J_work, I_work, nvals, nthreads) ; } else { info = GB_msort_3 (J_work, I_work, K_work, nvals, nthreads) ; } #ifdef GB_DEBUG if (info == GrB_SUCCESS) { int64_t ilast = -1 ; int64_t jlast = -1 ; for (int64_t k = 0 ; k < nvals ; k++) { int64_t i = I_work [k] ; int64_t j = J_work [k] ; ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ; ilast = i ; jlast = j ; } } #endif } else { //------------------------------------------------------------------ // sort a set of (i,k) tuples //------------------------------------------------------------------ if (S_iso) { // K_work is NULL; only sort (i) info = GB_msort_1 (I_work, nvals, nthreads) ; } else { info = GB_msort_2 (I_work, K_work, nvals, nthreads) ; } #ifdef GB_DEBUG if (info == GrB_SUCCESS) { int64_t ilast = -1 ; for (int64_t k = 0 ; k < nvals ; k++) { int64_t i = I_work [k] ; ASSERT (ilast <= i) ; ilast = i ; } } #endif } if (info != GrB_SUCCESS) { // out of memory in GB_msort_* GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // STEP 3: count vectors and duplicates in each slice //-------------------------------------------------------------------------- // Duplicates are located, counted and their indices negated. The # of // vectors in each slice is counted. If the indices are known to not have // duplicates, then only the vectors are counted. Counting the # of // vectors is skipped if already done by Step 1. if (known_no_duplicates) { //---------------------------------------------------------------------- // no duplicates: just count # vectors in each slice //---------------------------------------------------------------------- // This is much faster, particularly if the # of vectors in each slice // has already been computed. #ifdef GB_DEBUG { // assert that there are no duplicates int64_t ilast = -1, jlast = -1 ; for (int64_t t = 0 ; t < nvals ; t++) { int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ; bool is_duplicate = (i == ilast && j == jlast) ; ASSERT (!is_duplicate) ; ilast = i ; jlast = j ; } } #endif if (vdim <= 1) { // all tuples appear in at most one vector, and there are no // duplicates, so there is no need to scan I_work or J_work. for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; tnvec_slice [tid] = 0 ; tnz_slice [tid] = tend - tstart ; } tnvec_slice [0] = (nvals == 0) ? 0 : 1 ; } else { // count the # of unique vector indices in J_work. No need to scan // I_work since there are no duplicates to be found. Also no need // to compute them if already found in Step 1. if (!tnvec_and_tnz_slice_computed) { int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = 0 ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t j = J_work [t] ; if (j > jlast) { // vector j starts in this slice my_tnvec++ ; jlast = j ; } } tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = tend - tstart ; } } } } else { //---------------------------------------------------------------------- // look for duplicates and count # vectors in each slice //---------------------------------------------------------------------- for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; ilast_slice [tid] = GB_I_WORK (tstart-1) ; } int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = 0 ; int64_t my_ndupl = 0 ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t ilast = ilast_slice [tid] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t i = I_work [t] ; int64_t j = GB_J_WORK (t) ; // tuples are now sorted but there may be duplicates ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ; // check if (j,i,k) is a duplicate if (i == ilast && j == jlast) { // flag the tuple as a duplicate I_work [t] = -1 ; my_ndupl++ ; // the sort places earlier duplicate tuples (with smaller // k) after later ones (with larger k). ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ; } else { // this is a new tuple if (j > jlast) { // vector j starts in this slice my_tnvec++ ; jlast = j ; } ilast = i ; } } tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = (tend - tstart) - my_ndupl ; } } //-------------------------------------------------------------------------- // find total # of vectors and duplicates in all tuples //-------------------------------------------------------------------------- // Replace tnvec_slice with its cumulative sum, after which each slice tid // will be responsible for the # vectors in T that range from tnvec_slice // [tid] to tnvec_slice [tid+1]-1. GB_cumsum (tnvec_slice, nthreads, NULL, 1, NULL) ; int64_t tnvec = tnvec_slice [nthreads] ; // Replace tnz_slice with its cumulative sum GB_cumsum (tnz_slice, nthreads, NULL, 1, NULL) ; // find the total # of final entries, after assembling duplicates int64_t tnz = tnz_slice [nthreads] ; int64_t ndupl = nvals - tnz ; //-------------------------------------------------------------------------- // allocate T; always hypersparse //-------------------------------------------------------------------------- // allocate T; allocate T->p and T->h but do not initialize them. // T is always hypersparse. The header T always exists on input, as // either a static or dynamic header. bool static_header = T->static_header ; info = GB_new (&T, static_header, // always hyper, static or dynamic header ttype, vlen, vdim, GB_Ap_malloc, is_csc, GxB_HYPERSPARSE, GB_ALWAYS_HYPER, tnvec, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_WORK ; return (info) ; } ASSERT (T->p != NULL) ; ASSERT (T->h != NULL) ; ASSERT (T->b == NULL) ; ASSERT (T->i == NULL) ; ASSERT (T->x == NULL) ; T->iso = S_iso ; // OK: T is iso if and only if Sx is iso bool do_burble = (vlen > 1 || vdim > 1) && (nvals > 1) ; if (do_burble) { if (S_iso) { GBURBLE ("(iso build) ") ; } else { GBURBLE ("(build) ") ; } } //-------------------------------------------------------------------------- // STEP 4: construct the vector pointers and hyperlist for T //-------------------------------------------------------------------------- // Step 4 scans the J_work indices and constructs T->h and T->p. int64_t *restrict Th = T->h ; int64_t *restrict Tp = T->p ; if (vdim <= 1) { //---------------------------------------------------------------------- // special case for vectors //---------------------------------------------------------------------- ASSERT (tnvec == 0 || tnvec == 1) ; if (tnvec > 0) { Th [0] = 0 ; Tp [0] = 0 ; } } else if (ndupl == 0) { //---------------------------------------------------------------------- // no duplicates appear //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = tnvec_slice [tid] ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t j = GB_J_WORK (t) ; if (j > jlast) { // vector j starts in this slice Th [my_tnvec] = j ; Tp [my_tnvec] = t ; my_tnvec++ ; jlast = j ; } } } } else { //---------------------------------------------------------------------- // it is known that at least one duplicate appears //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnz = tnz_slice [tid] ; int64_t my_tnvec = tnvec_slice [tid] ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t i = I_work [t] ; int64_t j = GB_J_WORK (t) ; if (i >= 0) { // this is a new tuple if (j > jlast) { // vector j starts in this slice Th [my_tnvec] = j ; Tp [my_tnvec] = my_tnz ; my_tnvec++ ; jlast = j ; } my_tnz++ ; } } } } // log the end of the last vector T->nvec_nonempty = tnvec ; T->nvec = tnvec ; Tp [tnvec] = tnz ; ASSERT (T->nvec == T->plen) ; T->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // free J_work if it exists //-------------------------------------------------------------------------- ASSERT (J_work_handle != NULL) ; GB_FREE (J_work_handle, *J_work_size_handle) ; J_work = NULL ; //-------------------------------------------------------------------------- // allocate T->i //-------------------------------------------------------------------------- if (ndupl == 0) { // shrink I_work from size ijslen to size tnz if (tnz < ijslen) { // this cannot fail since the size is shrinking. bool ok ; GB_REALLOC (I_work, tnz, int64_t, I_work_size_handle, &ok, Context); ASSERT (ok) ; } // transplant I_work into T->i T->i = I_work ; T->i_size = (*I_work_size_handle) ; I_work = NULL ; (*I_work_handle) = NULL ; (*I_work_size_handle) = 0 ; } else { // duplicates exist, so allocate a new T->i. I_work must be freed later T->i = GB_MALLOC (tnz, int64_t, &(T->i_size)) ; if (T->i == NULL) { // out of memory GB_phbix_free (T) ; GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } } int64_t *restrict Ti = T->i ; //========================================================================== // numerical phase of the build: assemble any duplicates //========================================================================== // The tuples have been sorted. Assemble any duplicates with a switch // factory of built-in workers, or four generic workers. The vector // pointers T->p and hyperlist T->h (if hypersparse) have already been // computed. // If there are no duplicates, T->i holds the row indices of the tuple. // Otherwise, the row indices are still in I_work. K_work holds the // positions of each tuple in the array Sx. The tuples are sorted so that // duplicates are adjacent to each other and they appear in the order they // appeared in the original tuples. This method assembles the duplicates // and computes T->i and T->x from I_work, K_work, and Sx. into T, becoming // T->i. If no duplicates appear, T->i is already computed, and Sx just // needs to be copied and permuted into T->x. // The (i,k,Sx[k]) tuples are held in two integer arrays: (1) I_work or // T->i, and (2) K_work, and an array Sx of numerical values. Sx has not // been sorted, nor even accessed yet. It is identical to the original // unsorted tuples. The (i,k,Sx[k]) tuple holds the row index i, the // position k, and the value Sx [k]. This entry becomes T(i,j) = Sx [k] in // the matrix T, and duplicates (if any) are assembled via the dup // operator. //-------------------------------------------------------------------------- // get opcodes and check types //-------------------------------------------------------------------------- // With GB_build, there can be 1 to 2 different types. // T->type is identical to the types of x,y,z for z=dup(x,y). // dup is never NULL and all its three types are the same // The type of Sx (stype) can different but must be compatible // with T->type // With GB_wait, there can be 1 to 5 different types: // The pending tuples are in Sx, of type stype which must be // compatible with dup->ytype and T->type // z = dup (x,y): can be NULL or have 1 to 3 different types // T->type: must be compatible with all above types. // dup may be NULL, in which case it is assumed be the implicit SECOND // operator, with all three types equal to T->type GrB_Type xtype, ytype, ztype ; GxB_binary_function fdup ; #ifndef GBCOMPACT GB_Opcode opcode ; #endif GB_Type_code tcode = ttype->code ; const size_t tsize = ttype->size ; bool op_2nd ; ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ; if (dup == NULL) { //---------------------------------------------------------------------- // dup is the implicit SECOND operator //---------------------------------------------------------------------- // z = SECOND (x,y) where all three types are the same as ttype // T(i,j) = (ttype) Sx(k) will be done for all tuples. #ifndef GBCOMPACT opcode = GB_SECOND_opcode ; #endif xtype = ttype ; ytype = ttype ; ztype = ttype ; fdup = NULL ; op_2nd = true ; ASSERT (GB_op_is_second (dup, ttype)) ; } else { //---------------------------------------------------------------------- // dup is an explicit operator //---------------------------------------------------------------------- // T(i,j) = (ttype) Sx[k] will be done for the first tuple. // for subsequent tuples: T(i,j) += Sx[k], via the dup operator and // typecasting: // // y = (dup->ytype) Sx[k] // x = (dup->xtype) T(i,j) // z = (dup->ztype) dup (x,y) // T(i,j) = (ttype) z ASSERT_BINARYOP_OK (dup, "dup for build_factory", GB0) ; ASSERT (!S_iso) ; #ifndef GBCOMPACT opcode = dup->opcode ; #endif xtype = dup->xtype ; ytype = dup->ytype ; ztype = dup->ztype ; fdup = dup->function ; op_2nd = GB_op_is_second (dup, ttype) ; } //-------------------------------------------------------------------------- // get the sizes and codes of each type //-------------------------------------------------------------------------- GB_Type_code zcode = ztype->code ; GB_Type_code xcode = xtype->code ; GB_Type_code ycode = ytype->code ; ASSERT (GB_Type_compatible (ttype, stype)) ; // T(i,j) = (ttype) Sx ASSERT (GB_Type_compatible (ytype, stype)) ; // y = (ytype) Sx ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j) ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z size_t zsize = ztype->size ; size_t xsize = xtype->size ; size_t ysize = ytype->size ; // no typecasting if all 5 types are the same bool nocasting = (ttype == stype) && (ttype == xtype) && (ttype == ytype) && (ttype == ztype) ; ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ; ASSERT_TYPE_OK (stype, "stype for build_factory", GB0) ; ASSERT_TYPE_OK (xtype, "xtype for build_factory", GB0) ; ASSERT_TYPE_OK (ytype, "ytype for build_factory", GB0) ; ASSERT_TYPE_OK (ztype, "ztype for build_factory", GB0) ; //-------------------------------------------------------------------------- // STEP 5: assemble the tuples //-------------------------------------------------------------------------- bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ; if (copy_S_into_T && S_work != NULL) { //---------------------------------------------------------------------- // transplant S_work into T->x //---------------------------------------------------------------------- // No typecasting is needed, the tuples were originally in sorted // order, and no duplicates appear. All that is required is to copy Sx // into Tx. Sx can be directly transplanted into T->x since Sx is // provided as S_work. GB_builder must either transplant or free // S_work. The transplant can be used by GB_wait, whenever the tuples // are already sorted, with no duplicates, and no typecasting is // needed, since S_work is always A->Pending->x. T and Sx may be iso // or non-iso. T->x = S_work ; T->x_size = (*S_work_size_handle) ; S_work = NULL ; (*S_work_handle) = NULL ; (*S_work_size_handle) = 0 ; int64_t tx_size_required = tnz * tsize ; if (2 * tx_size_required < T->x_size) { // shrink the size of T->x bool ok = true ; GB_REALLOC (T->x, tx_size_required, GB_void, &(T->x_size), &ok, Context) ; } } else { //---------------------------------------------------------------------- // allocate T->x //---------------------------------------------------------------------- T->x = GB_XALLOC (S_iso, tnz, tsize, &(T->x_size)) ; if (T->x == NULL) { // out of memory GB_phbix_free (T) ; GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } GB_void *restrict Tx = (GB_void *) T->x ; ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ; if (nvals == 0) { // nothing to do } else if (copy_S_into_T) { //------------------------------------------------------------------ // copy Sx into T->x //------------------------------------------------------------------ // No typecasting is needed, the tuples were originally in sorted // order, and no duplicates appear. All that is required is to // copy Sx into Tx. Sx cannot be transplanted into T->x since // S_work is NULL and S_input cannot be modified by GB_builder. ASSERT (S_work == NULL) ; ASSERT (Sx == S_input) ; GB_memcpy (Tx, Sx, (S_iso ? 1 : nvals) * tsize, nthreads) ; } else if (nocasting) { //------------------------------------------------------------------ // assemble the values, Sx, into T, no typecasting needed //------------------------------------------------------------------ // Sx (either S_work or S_input) must be permuted and copied into // T->x, since the tuples had to be sorted, or duplicates appear. // Any duplicates are now assembled. // There are 44 common cases of this function for built-in types // and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types // (all but boolean; and OR, AND, XOR, and EQ for boolean. // In addition, the FIRST and SECOND operators are hard-coded, for // another 22 workers, since SECOND is used by GB_wait and since // FIRST is useful for keeping the first tuple seen. It is // controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they // do not appear in GB_reduce_to_* where the FIRST and SECOND // operators are not needed. // Early exit cannot be exploited, so the terminal is ignored. bool done = false ; if (S_iso) { //-------------------------------------------------------------- // T and Sx are iso; set iso value and delete duplicates //-------------------------------------------------------------- memcpy (Tx, Sx, tsize) ; #define GB_ISO_BUILD #include "GB_reduce_build_template.c" done = true ; } else { //-------------------------------------------------------------- // T and Sx are not iso; call in the workers //-------------------------------------------------------------- #ifndef GBCOMPACT //---------------------------------------------------------- // define the worker for the switch factory //---------------------------------------------------------- #define GB_INCLUDE_SECOND_OPERATOR #define GB_red(opname,aname) \ GB (_red_build_ ## opname ## aname) #define GB_RED_WORKER(opname,aname,atype) \ { \ info = GB_red (opname, aname) ((atype *) Tx, Ti, \ (atype *) Sx, nvals, ndupl, I_work, K_work, \ tstart_slice, tnz_slice, nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //---------------------------------------------------------- // launch the switch factory //---------------------------------------------------------- // controlled by opcode and typecode GB_Type_code typecode = tcode ; #include "GB_red_factory.c" #endif } //------------------------------------------------------------------ // generic worker //------------------------------------------------------------------ if (!done) { if (do_burble) GBURBLE ("(generic build) ") ; //-------------------------------------------------------------- // no typecasting, but use the fdup function pointer and memcpy //-------------------------------------------------------------- // Either the fdup operator or type of Sx and T are // user-defined, or fdup is not an associative operator handled // by the GB_red_factory, or some combination of these // conditions. User-defined types cannot be typecasted, so // this handles all user-defined types. // Tx [p] = (ttype) Sx [k], but with no typecasting #undef GB_CAST_ARRAY_TO_ARRAY #define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ memcpy (Tx +((p)*tsize), Sx +((k)*tsize), tsize) ; if (op_2nd) { //---------------------------------------------------------- // dup is the SECOND operator, with no typecasting //---------------------------------------------------------- // Tx [p] += (ttype) Sx [k], but 2nd op and no typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) #include "GB_reduce_build_template.c" } else { //---------------------------------------------------------- // dup is another operator, with no typecasting needed //---------------------------------------------------------- // Tx [p] += (ttype) Sx [k], but with no typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ fdup (Tx +((p)*tsize), Tx +((p)*tsize), Sx+((k)*tsize)); #include "GB_reduce_build_template.c" } } } else { //------------------------------------------------------------------ // assemble the values Sx into T, typecasting as needed //------------------------------------------------------------------ if (do_burble) { GBURBLE ("(generic build with typecast) ") ; } // If T and Sx are iso, no typecasting is ever done, so this method // is not used in that case. ASSERT (!S_iso) ; // Sx (either S_work or S_input) must be permuted and copied into // T->x, since the tuples had to be sorted, or duplicates appear. // Any duplicates are now assembled. Not all of the 5 types are // the same, but all of them are built-in since user-defined types // cannot be typecasted. const GB_Type_code scode = stype->code ; const size_t ssize = stype->size ; GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ; GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ; GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ; GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ; ASSERT (scode <= GB_FC64_code) ; ASSERT (tcode <= GB_FC64_code) ; ASSERT (xcode <= GB_FC64_code) ; ASSERT (ycode <= GB_FC64_code) ; ASSERT (zcode <= GB_FC64_code) ; // Tx [p] = (ttype) Sx [k], with typecasting #undef GB_CAST_ARRAY_TO_ARRAY #define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ cast_S_to_T (Tx +((p)*tsize), Sx +((k)*ssize), ssize) ; if (op_2nd) { //-------------------------------------------------------------- // dup operator is the SECOND operator, with typecasting //-------------------------------------------------------------- // Tx [p] += (ttype) Sx [k], but 2nd op, with typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) #include "GB_reduce_build_template.c" } else { //-------------------------------------------------------------- // dup is another operator, with typecasting required //-------------------------------------------------------------- // Tx [p] += Sx [k], with typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ { \ /* ywork = (ytype) Sx [k] */ \ GB_void ywork [GB_VLA(ysize)] ; \ cast_S_to_Y (ywork, Sx +((k)*ssize), ssize) ; \ /* xwork = (xtype) Tx [p] */ \ GB_void xwork [GB_VLA(xsize)] ; \ cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \ /* zwork = f (xwork, ywork) */ \ GB_void zwork [GB_VLA(zsize)] ; \ fdup (zwork, xwork, ywork) ; \ /* Tx [tnz-1] = (ttype) zwork */ \ cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \ } #include "GB_reduce_build_template.c" } } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; T->jumbled = false ; ASSERT_MATRIX_OK (T, "T built", GB0) ; ASSERT (GB_IS_HYPERSPARSE (T)) ; return (GrB_SUCCESS) ; }
hmacMD5_fmt_plug.c
/* * This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com> * and (c) magnum 2011-2015, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hmacMD5; #elif FMT_REGISTERS_H john_register_one(&fmt_hmacMD5); #else #include <string.h> #include "arch.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 // tuned for i7 using SSE2 and w/o HT #endif #endif #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "aligned.h" #include "simd-intrinsics.h" #include "base64_convert.h" #include "memdbg.h" #define FORMAT_LABEL "HMAC-MD5" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define MD5_N (SIMD_PARA_MD5 * SIMD_COEF_32) #endif #define ALGORITHM_NAME "password is key, MD5 " MD5_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 64 #define PAD_SIZE_W (PAD_SIZE/4) #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #ifdef SIMD_COEF_32 #define SALT_LIMBS 3 /* 3 limbs, 183 bytes */ #define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 9) #define SALT_ALIGN MEM_ALIGN_SIMD #else #define SALT_LENGTH 1023 #define SALT_ALIGN 1 #endif #define CIPHERTEXT_LENGTH (2 * SALT_LENGTH + 2 * BINARY_SIZE) #define HEXCHARS "0123456789abcdef" #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT MD5_N #define MAX_KEYS_PER_CRYPT MD5_N #define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i&63) & (0xffffffff - 3)) * SIMD_COEF_32 + ((i&63) & 3) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE * SIMD_COEF_32) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"what do ya want for nothing?#750c783e6ab0b503eaa86e310a5db738", "Jefe"}, {"YT1m11GDMm3oze0EdqO3FZmATSrxhquB#6c97850b296b34719b7cea5c0c751e22", ""}, {"2shXeqDlLdZ2pSMc0CBHfTyA5a9TKuSW#dfeb02c6f8a9ce89b554be60db3a2333", "magnum"}, {"#74e6f7298a9c2d168935f58c001bad88", ""}, {"The quick brown fox jumps over the lazy dog#80070713463e7749b90c2dc24911e275", "key"}, {"Beppe Grillo#F8457C3046C587BBCBD6D7036BA42C81", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {"$cram_md5$PG5vLXJlcGx5QGhhc2hjYXQubmV0Pg==$dXNlciA0NGVhZmQyMmZlNzY2NzBmNmIyODc5MDgxYTdmNWY3MQ==", "hashcat"}, {"MEaEObR2JNXgchVn93GLLH1Ud4qTzuC0#9a80bea0acd72231ea043210a173ec7f", "123"}, {"d2BbCbiSXTlglEstbFFlrRgPhR1KUa2s#7a553738bc4997e656329c1b1ef99e4f", "123456789"}, {"dBTmX1AdmnWyVkMKp7BEt4O3eBktdN2S#f6af0afd4f397504c3bfa3836bc04a0f", "passWOrd"}, {"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789#050a9dee01b2302914b2a78346721d9b", "magnum"}, {"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123#e4d0097fdc52f6fc50545d832784232d", "MaxLenSaltUsed"}, {NULL} }; #ifdef SIMD_COEF_32 static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; typedef struct cur_salt_t { unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT]; int salt_len; } cur_salt_t; static cur_salt_t *cur_salt; static int bufsize; #define SALT_SIZE sizeof(cur_salt_t) #else static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char (*opad)[PAD_SIZE]; static unsigned char cur_salt[SALT_LENGTH+1]; static MD5_CTX *ipad_ctx; static MD5_CTX *opad_ctx; #define SALT_SIZE sizeof(cur_salt) #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #ifdef SIMD_COEF_32 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 unsigned int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(BINARY_SIZE, i)] = 0x80; ((unsigned int*)crypt_key)[14 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + (i/SIMD_COEF_32) * PAD_SIZE_W * SIMD_COEF_32] = (BINARY_SIZE + PAD_SIZE) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad)); opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_32 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } /* Convert from Base64 format with tag to our legacy format */ static char *prepare(char *split_fields[10], struct fmt_main *self) { char *p = split_fields[1]; if (!strncmp(p, "$cram_md5$", 10)) { static char out[256]; int len, len2; char *d, *o = out; p += 10; memset(out, 0, sizeof(out)); if (!(d = strchr(p, '$'))) return split_fields[1]; len = base64_convert(p, e_b64_mime, (int)(d - p - 1), o, e_b64_raw, sizeof(out), flg_Base64_MIME_TRAIL_EQ, 0); if (len > sizeof(out)-2) return split_fields[1]; o += len; *o++ = '#'; d++; len2 = base64_convert(d, e_b64_mime, strlen(d), o, e_b64_raw, sizeof(out) - len - 2, flg_Base64_MIME_TRAIL_EQ, 0); if (len2 > sizeof(out) - len - 3) return split_fields[1]; len = len2; if (!(p = strchr(o, ' '))) return split_fields[1]; p++; if (p-o >= len) return split_fields[1]; memmove(o, p, len - (p - o) + 1); if (strlen(o) == BINARY_SIZE * 2) return out; } return p; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; if (strstr(ciphertext, "$SOURCE_HASH$")) return ciphertext; strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(strrchr(out, '#')); return out; } static int valid(char *ciphertext, struct fmt_main *self) { int pos, i; char *p; if (!strncmp(ciphertext, "$cram_md5$", 10)) { char *f[10]; f[1] = ciphertext; ciphertext = prepare(f, self); } p = strrchr(ciphertext, '#'); // allow # in salt if (!p || p > &ciphertext[strlen(ciphertext) - 1]) return 0; i = (int)(p - ciphertext); if (i > SALT_LENGTH) return 0; pos = i + 1; if (strlen(ciphertext+pos) != BINARY_SIZE * 2) return 0; for (i = pos; i < BINARY_SIZE*2+pos; i++) { if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = salt; #else strcpy((char*)cur_salt, (char*)salt); #endif } static void set_key(char *key, int index) { int len; #ifdef SIMD_COEF_32 uint32_t *ipadp = (uint32_t*)&ipad[GETPOS(0, index)]; uint32_t *opadp = (uint32_t*)&opad[GETPOS(0, index)]; const uint32_t *keyp = (uint32_t*)key; unsigned int temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; MD5_CTX ctx; int i; MD5_Init(&ctx); MD5_Update(&ctx, key, len); MD5_Final(k0, &ctx); keyp = (unsigned int*)k0; for (i = 0; i < BINARY_SIZE / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32) { temp = *keyp++; *ipadp ^= temp; *opadp ^= temp; } } else while((unsigned char)(temp = *keyp++)) { if (!(temp & 0xff00) || !(temp & 0xff0000)) { *ipadp ^= (unsigned short)temp; *opadp ^= (unsigned short)temp; break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0xff000000)) break; ipadp += SIMD_COEF_32; opadp += SIMD_COEF_32; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { MD5_CTX ctx; unsigned char k0[BINARY_SIZE]; MD5_Init(&ctx); MD5_Update(&ctx, key, len); MD5_Final(k0, &ctx); len = BINARY_SIZE; for (i = 0; i < len; i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for (i = 0; i < len; i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y = 0; for (; y < (unsigned int)(count + SIMD_COEF_32 - 1) / SIMD_COEF_32; y++) for (x = 0; x < SIMD_COEF_32; x++) { // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32 * PAD_SIZE_W]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (index = 0; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 int i; for (i = 0; i < (BINARY_SIZE/4); i++) // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #if _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 int i; if (new_keys) { SIMDmd5body(&ipad[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); SIMDmd5body(&opad[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); } SIMDmd5body(cur_salt->salt[0], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); for (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++) { SIMDmd5body(cur_salt->salt[i], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT); } SIMDmd5body(&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); #else MD5_CTX ctx; if (new_keys) { MD5_Init(&ipad_ctx[index]); MD5_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); MD5_Init(&opad_ctx[index]); MD5_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); MD5_Update(&ctx, cur_salt, strlen((char*)cur_salt)); MD5_Final((unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); MD5_Update(&ctx, crypt_key[index], BINARY_SIZE); MD5_Final((unsigned char*) crypt_key[index], &ctx); #endif } new_keys = 0; return count; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; // allow # in salt p = strrchr(ciphertext, '#') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return (void*)out; } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH+1]; int len; #ifdef SIMD_COEF_32 unsigned int i = 0; static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt; int salt_len = 0; #endif // allow # in salt len = strrchr(ciphertext, '#') - ciphertext; memset(salt, 0, sizeof(salt)); memcpy(salt, ciphertext, len); #ifdef SIMD_COEF_32 memset(&cur_salt, 0, sizeof(cur_salt)); while(((unsigned char*)salt)[salt_len]) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = ((unsigned char*)salt)[salt_len]; ++salt_len; } cur_salt.salt_len = salt_len; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80; ((unsigned int*)cur_salt.salt[(salt_len + 8) / PAD_SIZE])[14 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32] = (salt_len + PAD_SIZE) << 3; } return &cur_salt; #else return salt; #endif } struct fmt_main fmt_hmacMD5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE | FMT_HUGE_INPUT, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
decoder.c
/*! @file * @brief * * @version 1.0.0 * * (C) Copyright 2017 GoPro Inc (http://gopro.com/). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "config.h" #include "timing.h" #if WARPSTUFF #include "WarpLib.h" #endif //#include <stdlib.h> #include <stddef.h> #include <math.h> #include <memory.h> #include <time.h> //#include <stdint.h> #ifndef DEBUG #define DEBUG (1 && _DEBUG) #endif #ifndef TIMING #define TIMING (1 && _TIMING) #endif #ifndef XMMOPT #define XMMOPT (1 && _XMMOPT) #endif #define GEN_LICENSE 0 #ifndef PI #define PI 3.14159265359f #endif #ifdef _WIN32 #include <windows.h> #elif __APPLE__ #include "macdefs.h" #else #ifndef ZeroMemory #define ZeroMemory(p,s) memset(p,0,s) #endif #endif #include <stdio.h> #include <assert.h> #include <emmintrin.h> // Intel aligned alloc and free #include "dump.h" #include "decoder.h" #include "codec.h" #include "vlc.h" #include "codebooks.h" // References to the codebooks #include "debug.h" #include "color.h" // Color formats supported by image processing routines #include "image.h" #include "filter.h" #include "spatial.h" #include "temporal.h" //#include "logo40x5.h" #include "convert.h" #include "wavelet.h" #include "bitstream.h" #include "frame.h" #include "cpuid.h" #include "bayer.h" #include "metadata.h" #include "DemoasicFrames.h" //TODO: Change filename to lower case #include "swap.h" #include "draw.h" #include "RGB2YUV.h" #include "lutpath.h" #include "exception.h" extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain, int16_t *sptr, int resolution, int pixelsize); extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize); extern void FastSharpeningBlurVWP13(short *Aptr, short *Bptr, short *Cptr, short *Dptr, short *Eptr, int pitch, int edgenear, short *output, int pixels, float sharpness, int resolution, int channel_blend_type); extern void FastSharpeningBlurVW13A(short *Aptr, short *Bptr, short *Cptr, short *Dptr, short *Eptr, int pitch, int edgenear, short *output, int pixels, float sharpness, int resolution, int channel_blend_type); #ifdef SPI_LOADER #include "spi.h" #include "keyframes.h" #endif #ifndef DUMP #define DUMP (0 && _DUMP) #endif #define ERROR_TOLERANT 1 #if defined(_WIN32) && DEBUG #include <tchar.h> // For printing debug string in the console window #endif #define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform #define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform #if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch #if _DECODE_TRANSFORM == 0 #define _DECODE_TRANSFORM 1 #endif #endif #ifndef _FSMBUFFER #define _FSMBUFFER 0 #endif // Turn off saturation in this file #ifdef SATURATE #undef SATURATE #endif #define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x)) #define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x)) //#define SATURATE8S(x) SATURATE_8S(x) //#define SATURATE(x) (x) // Enable or disable function inlining #if 1 //DEBUG #define inline #else #define inline __forceinline #endif // Pixel size used for computing the compression ratio #define BITS_PER_PIXEL 8 // Default processor capabilities #define DEFAULT_FEATURES (_CPU_FEATURE_MMX ) #define DEMOSAIC_DELAYLINES 4 // Forward references void AllocDecoderGroup(DECODER *decoder); bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format); void EraseDecoderFrames(DECODER *decoder); TRANSFORM *AllocGroupTransform(GROUP *group, int channel); void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format); #if _DEBUG bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile); #else bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch); #endif bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant); bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant); void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision); extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index, uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading, uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes); //extern void ComputeCube(DECODER *decoder); extern bool NeedCube(DECODER *decoder); extern void LoadTweak(); //extern int g_topdown; //extern int g_bottomup; // Performance measurements #if _TIMING extern TIMER tk_decompress; // Timers extern TIMER tk_decoding; extern TIMER tk_convert; extern TIMER tk_inverse; extern COUNTER decode_byte_count; // Counters extern COUNTER sample_byte_count; extern COUNTER alloc_group_count; extern COUNTER alloc_transform_count; extern COUNTER alloc_buffer_count; extern COUNTER spatial_decoding_count; extern COUNTER temporal_decoding_count; extern COUNTER progressive_decode_count; #endif #if 0 // Table that maps from decoded format to pixel size static const int PixelSize[] = { 0, // DECODED_FORMAT_UNSUPPORTED 2, // DECODED_FORMAT_YUYV 2, // DECODED_FORMAT_UYVY 2, // DECODED_FORMAT_420 4, // DECODED_FORMAT_RGB32 3, // DECODED_FORMAT_RGB24 2, // DECODED_FORMAT_RGB555 2, // DECODED_FORMAT_RGB565 #if 0 2, // DECODED_FORMAT_YUYV_INVERTED 2, // DECODED_FORMAT_UYVY_INVERTED 2, // DECODED_FORMAT_420_INVERTED #endif 4, // DECODED_FORMAT_RGB32_INVERTED 3, // DECODED_FORMAT_RGB24_INVERTED 2, // DECODED_FORMAT_RGB555_INVERTED 2, // DECODED_FORMAT_RGB565_INVERTED 3, // DECODED_FORMAT_V210, 4, // DECODED_FORMAT_YU64, // Custom 16 bits per channel (all data scaled up) YUYV format. 4, // DECODED_FORMAT_YR16 // Rows of YUV with 16 bits per channel }; #if _DEBUG char *decoded_format_string[] = { "Unsupported", "YUYV", "UYUV", "420", "RGB32", "RGB24", "RGB555", "RGB565", #if 0 "YUYV Inverted", "UYVY Inverted", "420 Inverted", #endif //#if BUILD_PROSPECT "RGB32 Inverted", "RGB24 Inverted", "RGB555 Inverted", "RGB565 Inverted", "V210" //#endif }; #endif #else static const int pixel_size_table[] = { 0, // COLOR_FORMAT_UNKNOWN 2, // COLOR_FORMAT_UYVY 2, // COLOR_FORMAT_YUYV 2, // COLOR_FORMAT_YVYU 0, // COLOR_FORMAT_YV12 0, // COLOR_FORMAT_I420 2, // COLOR_FORMAT_RGB16 3, // COLOR_FORMAT_RGB24 4, // COLOR_FORMAT_RGB32 0, 3, // COLOR_FORMAT_V210 0, // COLOR_FORMAT_RGB10 4, // COLOR_FORMAT_YU64 4, // COLOR_FORMAT_YR16 4, // COLOR_FORMAT_YUVA }; static const int pixel_size_table_length = sizeof(pixel_size_table)/sizeof(pixel_size_table[0]); static int PixelSize(int format) { int pixel_size = 0; // Mask off the other fields in the format descriptor // Use the lookup table to determine the pixel size (if possible) if (0 <= format && format < pixel_size_table_length) { pixel_size = pixel_size_table[format]; //return pixel_size; } //TODO: Change the rest of this routine into one big switch statement // Is this an Avid format? else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END) { switch (format) { case COLOR_FORMAT_CbYCrY_8bit: case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane pixel_size = 1; break; case COLOR_FORMAT_CbYCrY_16bit: case COLOR_FORMAT_CbYCrY_16bit_2_14: case COLOR_FORMAT_CbYCrY_16bit_10_6: pixel_size = 2; break; default: assert(0); pixel_size = 2; // Assume 16 bits per pixel if the format is unknown break; } } // Is this a Bayer format? else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END) { pixel_size = (format - 100); if(pixel_size > 2) pixel_size = 2; } else if (format == COLOR_FORMAT_RG48) pixel_size = 6; else if (format == COLOR_FORMAT_RG64) pixel_size = 8; else if (format == COLOR_FORMAT_B64A) { pixel_size = 8; } return pixel_size; } #endif int DecodedPixelSize(DECODED_FORMAT format) { int pixel_size = 0; // Compute the pixel size switch (format) { case DECODED_FORMAT_YUYV: pixel_size = 2; break; case DECODED_FORMAT_RGB32: pixel_size = 4; break; case DECODED_FORMAT_RG48: pixel_size = 6; break; case DECODED_FORMAT_CT_UCHAR: pixel_size = 2; break; case DECODED_FORMAT_CT_SHORT: case DECODED_FORMAT_CT_SHORT_2_14: case DECODED_FORMAT_CT_USHORT_10_6: pixel_size = 4; break; case DECODED_FORMAT_CT_10Bit_2_8: case DECODED_FORMAT_V210: // This routine should not be called to compute the pixel sizes for these formats assert(0); return 0; break; case DECODED_FORMAT_ROW16U: pixel_size = 4; break; default: assert(0); return 0; break; } return pixel_size; } #if 0 // Convert FOURCC code to a string static void str4cc(char *string, uint32_t marker) { char *p = (char *)&marker + 3; char *s = string; int i; for (i = 0; i < 4; i++) *(s++) = *(p--); *s = '\0'; } #endif void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h) { int origw,origh, guess = 0; origw = decoder->frame.width; origh = decoder->frame.height; switch(decoder->frame.resolution) { case DECODED_RESOLUTION_FULL: break; case DECODED_RESOLUTION_HALF: origw *= 2; origh *= 2; break; case DECODED_RESOLUTION_QUARTER: origw *= 4; origh *= 4; break; case DECODED_RESOLUTION_LOWPASS_ONLY: origw *= 8; origh *= 8; break; case DECODED_RESOLUTION_FULL_DEBAYER: break; case DECODED_RESOLUTION_HALF_NODEBAYER: origw *= 2; origh *= 2; break; case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED: origw *= 4; origh *= 4; break; case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved. break; case DECODED_RESOLUTION_HALF_HORIZONTAL: origw *= 2; break; case DECODED_RESOLUTION_HALF_VERTICAL: origh *= 2; break; } if(decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0) guess = 1; // if guess default values, we can't trust them if(decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9) guess = 1; if(decoder->pixel_aspect_x && decoder->pixel_aspect_y) { int j,den,num; decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y; decoder->codec.picture_aspect_y = den = origh; for(j=2; j<num+den; j++) { while(num == (num/j)*j && den == (den/j)*j) { num /= j; den /= j; } } decoder->codec.picture_aspect_x = num; decoder->codec.picture_aspect_y = den; guess = 0; } if(guess) { if(origw > 720) //HD. { if(origh == 1080) { if(origw == 2048) *w=origw,*h=origh; else *w=16,*h=9; // assume 16x9 } else if(origh == 720) { *w=16,*h=9; // assume 16x9 } else { *w=origw,*h=origh; // assume square pixel. } } else { if(origh == 720) { *w=16,*h=9; // assume 16x9 } else { *w=origw,*h=origh; // assume square pixel. } } } else { *w=decoder->codec.picture_aspect_x; *h=decoder->codec.picture_aspect_y; } } bool IsValidFrameResolution(int resolution) { switch (resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF: case DECODED_RESOLUTION_QUARTER: case DECODED_RESOLUTION_LOWPASS_ONLY: case DECODED_RESOLUTION_HALF_HORIZONTAL: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: return true; default: return false; } } // Return true if this decoder can decode to quarter resolution bool IsQuarterResolutionEnabled(DECODER *decoder) { return true; } size_t DecoderSize() { return sizeof(DECODER); } void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder); } #endif { //TODO: Clear the decoder before setting the CPU limit and affinity int i; //int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0; //save key params Thread_cntrl saved_params = decoder->thread_cntrl; // Clear everything memset(decoder, 0, sizeof(DECODER)); //restore key params if(saved_params.set_thread_params == 1) // used by the DShow Interface { decoder->thread_cntrl = saved_params; } #if _TIMING InitTiming(); #endif // Set the file for status information during decoding decoder->logfile = logfile; // Initialize the decoding error to no error decoder->error = CODEC_ERROR_OKAY; // Most recent marker found during decoding decoder->marker = 0; // Count of frames decoded decoder->frame_count = 0; // Set the codebooks that will be used for decoding if (cs != NULL) { // Use the codeset provided in the call for(i=0; i<CODEC_NUM_CODESETS; i++) { // Codebook for decoding highpass coefficients decoder->magsbook[i] = cs[i].magsbook; // Codebook for decoding runs of coefficients decoder->runsbook[i] = cs[i].runsbook; // Lookup table for fast codebook search decoder->fastbook[i] = cs[i].fastbook; } } else { // Use the default codeset decoder->magsbook[0] = cs9.magsbook; decoder->runsbook[0] = cs9.runsbook; decoder->fastbook[0] = cs9.fastbook; } // Initialize the codec state InitCodecState(&decoder->codec); InitScratchBuffer(&decoder->scratch, NULL, 0); #if _DUMP // Initialize the descriptor for controlling debug output decoder->dump.enabled = false; decoder->dump.channel_mask = 0; decoder->dump.wavelet_mask = 0; memset(decoder->dump.directory, 0, sizeof(decoder->dump.directory)); memset(decoder->dump.filename, 0, sizeof(decoder->dump.filename)); #endif } //REDTEST decoder->frm = 0; decoder->run = 1; #if _ALLOCATOR decoder->allocator = NULL; #endif decoder->initialized = 1; //DAN20060912 } void InitDecoderLicense(DECODER *decoder, const unsigned char *licensekey) { if (decoder && licensekey) { const unsigned char unlicensed[16] = {0}; //memset(unlicensed, 0, sizeof(unlicensed)); // Has the license been set? if (memcmp(decoder->licensekey, unlicensed, sizeof(decoder->licensekey)) == 0) { // Copy the license into the decoder memcpy(decoder->licensekey, licensekey, sizeof(decoder->licensekey)); } } } // Free data allocated within the decoder void ClearDecoder(DECODER *decoder) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Free the transforms allocated in the decoder int i; if(decoder->initialized == 0) return; // nothing to free //DAN20060912 #if _GRAPHICS DrawClose(decoder); #endif for(i=0; i<=METADATA_PRIORITY_MAX; i++) { if(decoder->DataBases[i]) { #if _ALLOCATOR Free(decoder->allocator, decoder->DataBases[i]); #else MEMORY_FREE(decoder->DataBases[i]); #endif decoder->DataBases[i] = NULL; decoder->DataBasesSize[i] = 0; decoder->DataBasesAllocSize[i] = 0; } } if(decoder->sqrttable) { #if _ALLOCATOR Free(decoder->allocator, decoder->sqrttable); #else MEMORY_FREE(decoder->sqrttable); #endif decoder->sqrttable = NULL; } for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++) { #if _ALLOCATOR FreeTransform(allocator, decoder->transform[i]); #else FreeTransform(decoder->transform[i]); #endif decoder->transform[i] = NULL; } if(decoder->aligned_sample_buffer) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->aligned_sample_buffer); #else MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer); #endif decoder->aligned_sample_buffer = NULL; decoder->aligned_sample_buffer_size = 0; } if(decoder->tools) { #if _ALLOCATOR Free(decoder->allocator, decoder->tools); #else MEMORY_FREE(decoder->tools); #endif decoder->tools = NULL; } // Free the buffer allocated for decoding if (decoder->buffer != NULL) { #if DEBUG_BUFFER_USAGE int i; char *ptr = (char *)decoder->buffer; FILE *fp = fopen("C:/free.txt", "a"); fprintf(fp, "decoder->buffer = %08x buffer_size = %d\n", decoder->buffer ,decoder->buffer_size); i = decoder->buffer_size-1; while(ptr[i] == 1) i--; fprintf(fp, "used %2.3f percent\n", 100.0*(float)i/(float)decoder->buffer_size); fclose(fp); #endif #if _ALLOCATOR FreeAligned(allocator, decoder->buffer); #else MEMORY_ALIGNED_FREE(decoder->buffer); #endif decoder->buffer = NULL; decoder->buffer_size = 0; // Clear the fields in the scratch buffer descriptor memset(&decoder->scratch, 0, sizeof(SCRATCH)); // Eventually the buffer and buffer size fields will be obsolete } for(i=0;i<_MAX_CPUS;i++) { if(decoder->threads_buffer[i]) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->threads_buffer[i]); #else MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]); #endif decoder->threads_buffer[i] = NULL; } } decoder->threads_buffer_size = 0; // Do not attempt to free the codebooks since the // codebook pointers are references to static tables // Can free some of the data structures allocated by the decoder FreeCodebooks(decoder); #if _INTERLACED_WORKER_THREADS if(decoder->interlaced_worker.lock_init) // threads started { int i; // Signal this thread to stop SetEvent(decoder->interlaced_worker.stop_event); // Free all handles used by the worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { WaitForSingleObject(decoder->interlaced_worker.handle[i], INFINITE); //JY20080307 CloseHandle(decoder->interlaced_worker.handle[i]); CloseHandle(decoder->interlaced_worker.start_event[i]); CloseHandle(decoder->interlaced_worker.done_event[i]); } CloseHandle(decoder->interlaced_worker.row_semaphore); CloseHandle(decoder->interlaced_worker.stop_event); for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.handle[i] = 0; decoder->interlaced_worker.start_event[i] = 0; decoder->interlaced_worker.done_event[i] = 0; } decoder->interlaced_worker.row_semaphore = 0; decoder->interlaced_worker.stop_event = 0; } // Free the critical section used by the worker threads DeleteCriticalSection(&decoder->interlaced_worker.lock); decoder->interlaced_worker.lock_init = 0; #endif #if _THREADED if(decoder->entropy_worker_new.pool.thread_count) { ThreadPoolDelete(&decoder->entropy_worker_new.pool); DeleteLock(&decoder->entropy_worker_new.lock); } if(decoder->worker_thread.pool.thread_count) { ThreadPoolDelete(&decoder->worker_thread.pool); DeleteLock(&decoder->worker_thread.lock); } if(decoder->draw_thread.pool.thread_count) { ThreadPoolDelete(&decoder->draw_thread.pool); DeleteLock(&decoder->draw_thread.lock); } /* if(decoder->qt_convert_worker.pool.thread_count) { ThreadPoolDelete(&decoder->qt_convert_worker.pool); DeleteLock(&decoder->qt_convert_worker.lock); } if(decoder->qt_scale_worker.pool.thread_count) { ThreadPoolDelete(&decoder->qt_scale_worker.pool); DeleteLock(&decoder->qt_scale_worker.lock); } */ if(decoder->parallelDecoder) { if(decoder->parallelDecoder->decoder_thread.pool.thread_count) { ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool); DeleteLock(&decoder->parallelDecoder->decoder_thread.lock); decoder->parallelDecoder->decoder_thread.pool.thread_count = 0; } ClearDecoder(decoder->parallelDecoder); #if _ALLOCATOR Free(decoder->allocator, decoder->parallelDecoder); #else MEMORY_FREE(decoder->parallelDecoder); #endif decoder->parallelDecoder = NULL; } #endif //MEMORY_ALIGNED_FREE(RawBayer16); #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = 0; decoder->RGBFilterBufferSize = 0; } if(decoder->RawBayer16) { FreeAligned(decoder->allocator, decoder->RawBayer16); decoder->RawBayer16 = 0; decoder->RawBayerSize = 0; } if(decoder->StereoBuffer) { FreeAligned(decoder->allocator, decoder->StereoBuffer); decoder->StereoBuffer = 0; decoder->StereoBufferSize = 0; } if(decoder->RawCube) { FreeAligned(decoder->allocator, decoder->RawCube); decoder->RawCube = 0; } if(decoder->Curve2Linear) { FreeAligned(decoder->allocator, decoder->Curve2Linear); decoder->Curve2Linear = 0; } if(decoder->Linear2CurveRed) { FreeAligned(decoder->allocator, decoder->Linear2CurveRed); decoder->Linear2CurveRed = NULL; } if(decoder->Linear2CurveGrn) { FreeAligned(decoder->allocator, decoder->Linear2CurveGrn); decoder->Linear2CurveGrn = NULL; } if(decoder->Linear2CurveBlu) { FreeAligned(decoder->allocator, decoder->Linear2CurveBlu); decoder->Linear2CurveBlu = NULL; } if(decoder->BYR4LinearRestore) { FreeAligned(decoder->allocator, decoder->BYR4LinearRestore); decoder->BYR4LinearRestore = NULL; } if(decoder->GammaContrastRed) { FreeAligned(decoder->allocator, decoder->GammaContrastRed); decoder->GammaContrastRed = NULL; } if(decoder->GammaContrastGrn) { FreeAligned(decoder->allocator, decoder->GammaContrastGrn); decoder->GammaContrastGrn = NULL; } if(decoder->GammaContrastBlu) { FreeAligned(decoder->allocator, decoder->GammaContrastBlu); decoder->GammaContrastBlu = NULL; } //3d LUT { if(decoder->LUTcache) Free(decoder->allocator, decoder->LUTcache); decoder->LUTcache = NULL; decoder->LUTcacheCRC = 0; } #if WARPSTUFF { if (decoder->lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, decoder->lens_correct_buffer); #else MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer); #endif if (decoder->mesh) geomesh_destroy(decoder->mesh); decoder->lastLensOffsetX = 0; decoder->lastLensOffsetY = 0; decoder->lastLensOffsetZ = 0; decoder->lastLensOffsetR = 0; decoder->lastLensZoom = 0; decoder->lastLensFishFOV = 0; decoder->lastLensGoPro = 0; decoder->lastLensSphere = 0; decoder->lastLensFill = 0; decoder->lastLensStyleSel = 0; memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC)); memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST)); decoder->mesh = NULL; decoder->lens_correct_buffer = NULL; } #endif if(decoder->overrideData) { Free(decoder->allocator, decoder->overrideData); decoder->overrideData = NULL; decoder->overrideSize = 0; } for(i=0; i<64; i++) { if(decoder->mdc[i]) Free(decoder->allocator, decoder->mdc[i]); decoder->mdc[i] = NULL; decoder->mdc_size[i] = 0; } #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } if(decoder->RawBayer16) { MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; } if(decoder->StereoBuffer) { MEMORY_ALIGNED_FREE(decoder->StereoBuffer); decoder->StereoBuffer = NULL; decoder->StereoBufferSize = 0; } if(decoder->RawCube) { MEMORY_ALIGNED_FREE(decoder->RawCube); decoder->RawCube = NULL; } if(decoder->Curve2Linear) { MEMORY_ALIGNED_FREE(decoder->Curve2Linear); decoder->Curve2Linear = NULL; } if(decoder->BYR4LinearRestore) { MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore); decoder->BYR4LinearRestore = NULL; } if(decoder->Linear2CurveRed) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed); decoder->Linear2CurveRed = NULL; } if(decoder->Linear2CurveGrn) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn); decoder->Linear2CurveGrn = NULL; } if(decoder->Linear2CurveBlu) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu); decoder->Linear2CurveBlu = NULL; } if(decoder->GammaContrastRed) { MEMORY_ALIGNED_FREE(decoder->GammaContrastRed); decoder->GammaContrastRed = NULL; } if(decoder->GammaContrastGrn) { MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn); decoder->GammaContrastGrn = NULL; } if(decoder->GammaContrastBlu) { MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu); decoder->GammaContrastBlu = NULL; } //3d LUT { if(decoder->LUTcache) MEMORY_FREE(decoder->LUTcache); decoder->LUTcache = NULL; decoder->LUTcacheCRC = 0; } #if WARPSTUFF { if (decoder->lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, decoder->lens_correct_buffer); #else MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer); #endif if (decoder->mesh) geomesh_destroy(mesh); decoder->mesh = NULL; decoder->lens_correct_buffer = NULL; decoder->lastLensOffsetX = 0; decoder->lastLensOffsetY = 0; decoder->lastLensOffsetZ = 0; decoder->lastLensOffsetR = 0; decoder->lastLensZoom = 0; decoder->lastLensFishFOV = 0; decoder->lastLlensGoPro = 0; decoder->lastLlensSphere = 0; decoder->lastLlensFill = 0; decoder->lastLlensStyleSel = 0; memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC)); memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST)); } #endif if(decoder->overrideData) { MEMORY_FREE(decoder->overrideData); decoder->overrideData = NULL; decoder->overrideSize = 0; } for(i=0; i<64; i++) { if(decoder->mdc[i]) MEMORY_FREE(decoder->mdc[i]); decoder->mdc[i] = NULL; decoder->mdc_size[i] = 0; } #endif #ifdef SPI_LOADER SPIReleaseAll(decoder); //KeyframesReleaseAll(decoder); #endif decoder->initialized = 0;// cleared } void ExitDecoder(DECODER *decoder) { // Let the caller keep the logfile open or choose to close it //if (logfile) fclose(logfile); // Free data allocated within the decoder ClearDecoder(decoder); } // Allocate the data structures for decoding a group void AllocDecoderGroup(DECODER *decoder) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels;//DAN07022004 int channel; assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004 for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004 { TRANSFORM *transform = decoder->transform[channel]; // Need to allocate a transform data structure? if (transform == NULL) { #if _ALLOCATOR transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM)); #else transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM)); #endif assert(transform != NULL); if (transform == NULL) { decoder->error = CODEC_ERROR_TRANSFORM_MEMORY; return; } memset(transform, 0, sizeof(TRANSFORM)); decoder->transform[channel] = transform; #if _TIMING alloc_transform_count++; #endif } } } // Allocate the buffer used for intermediate results during decoding bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format) { int cpus; size_t size; size_t row_size; char *buffer; #if 0 // Allocate a buffer large enough for six rows of cache lines size = width * sizeof(PIXEL); size = ALIGN(size, _CACHE_LINE_SIZE); size = 2 * TRANSFORM_MAX_CHANNELS * size; #else // Allocate a buffer large enough for nine rows of cache lines size = width * sizeof(PIXEL) * 4; size = ALIGN(size, _CACHE_LINE_SIZE); size = 3 * TRANSFORM_MAX_CHANNELS * size; #endif switch (format) { case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: // Increase the buffer size for decoding to the V210 format row_size = 4 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_YR16: case DECODED_FORMAT_CbYCrY_10bit_2_8: case DECODED_FORMAT_CbYCrY_16bit_2_14: case DECODED_FORMAT_CbYCrY_16bit_10_6: // Increase the buffer size for decoding to the YUV16 format row_size = 4 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 8 * 2 * row_size; break; case DECODED_FORMAT_RG48: case DECODED_FORMAT_WP13: // Increase the buffer size for decoding to the YUV16 format row_size = 6 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 12 * 2 * row_size; break; case DECODED_FORMAT_RG64: // Increase the buffer size for decoding to the YUV16 format row_size = 8 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 16 * 2 * row_size; break; case DECODED_FORMAT_BYR3: // Increase the buffer size for decoding to the YUV16 format row_size = 2 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_BYR4: // Increase the buffer size for decoding to the YUV16 format row_size = 2 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_B64A: case DECODED_FORMAT_W13A: // Increase the buffer size for decoding to the B64A format row_size = 8 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 16 * 2 * row_size; break; default: // Increase the buffer size for YUV to RGB conversion row_size = 3 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 2 * 2 * row_size; break; } cpus = decoder->thread_cntrl.capabilities >> 16; if(cpus > 4) size *= 4; if(cpus > 16) //DAN20120803 -- 4444 clips size *= 2; // Has a buffer already been allocated? if (decoder->buffer != NULL) { // Is the buffer large enough? if (decoder->buffer_size < size) { // Free the previous buffer #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->buffer); #else MEMORY_ALIGNED_FREE(decoder->buffer); #endif decoder->buffer = NULL; decoder->buffer_size = 0; } else { return true; } } buffer = decoder->buffer; if(buffer == NULL) { // Allocate the decoding buffer #if _ALLOCATOR buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE); #else buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE); #endif if(buffer == NULL) { return false; } } #if DEBUG_BUFFER_USAGE memset(buffer, 1, size); #endif // Save the buffer and its size in the decoder decoder->buffer = buffer; decoder->buffer_size = size; // Initialize the scratch space descriptor InitScratchBuffer(&decoder->scratch, buffer, size); // allocate buffer for each debayer/color formating thread { int i; size = (width+16)*3*2*4*2*4;// sixteen lines if(height*4 > width*3) //square or tall images where running out of scratch space for zooms. size *= 1 + ((height+(width/2))/width); if (decoder->threads_buffer_size < size) { for(i=0;i<_MAX_CPUS;i++) { if(decoder->threads_buffer[i]) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->threads_buffer[i]); #else MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]); #endif decoder->threads_buffer[i] = NULL; } } decoder->threads_buffer_size = 0; } for(i=0;i<cpus;i++) { if(decoder->threads_buffer[i] == NULL) { #if _ALLOCATOR decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE); #else decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE); #endif if(decoder->threads_buffer[i] == NULL) { return false; } } } decoder->threads_buffer_size = size; } // Eventually the scratch space descriptor will replace the buffer and buffer_size fields return true; } bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format) { // Check that the dimensions are valid assert(width > 0); assert(height > 0); // Just call the allocation routine return AllocDecoderBuffer(decoder, width, height, format); } void ClearTransformFlags(DECODER *decoder) { TRANSFORM **transform_array = decoder->transform; int channel; for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++) { TRANSFORM *transform = transform_array[channel]; int index; if (transform == NULL) break; for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++) { IMAGE *wavelet = transform->wavelet[index]; if (wavelet != NULL) { wavelet->band_valid_flags = 0; wavelet->band_started_flags = 0; } } } } // Initialize the tables for decoding the wavelet transforms void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands) { size_t subband_table_size = num_subbands * sizeof(int); memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index)); memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size); memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index)); memcpy(decoder->subband_band_index, subband_band_index, subband_table_size); } #if 0 static bool IsValidFormat(int format) { bool valid_format = true; //TODO: Change this routine into a switch statement if(format == COLOR_FORMAT_BYR5) return true; // can decode to BYR5 if(format == COLOR_FORMAT_BYR4) return true; // can decode to BYR4 if(format == COLOR_FORMAT_BYR3) return true; // can decode to BYR3 if(format == COLOR_FORMAT_BYR2) return true; // can decode to BYR2 if(format == COLOR_FORMAT_RG48) return true; // can decode to RGB48 if(format == COLOR_FORMAT_RG64) return true; // can decode to RGBA64 if (format == COLOR_FORMAT_B64A) { return true; // Can decode to B64A } if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT)) { valid_format = false; } return valid_format; } #endif #if _INTERLACED_WORKER_THREADS void StartInterlaceWorkerThreads(DECODER *decoder) { int i; if(decoder->interlaced_worker.lock_init == 0) { // Create events for starting the worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL); } // Create a semaphore to signal the worker threads to process rows decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL); // Create an event for each worker thread to signal that it has finished for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL); } // Create an event for forcing the worker threads to terminate decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL); // Zero the count of worker threads that are active decoder->interlaced_worker.thread_count = 0; // Initialize the lock for controlling access to the worker thread data InitializeCriticalSection(&decoder->interlaced_worker.lock); decoder->interlaced_worker.lock_init = 1; for (i = 0; i < THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.id[i] = 0; decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]); assert(decoder->interlaced_worker.handle[i] != NULL); } } } #endif #if 0 int TestException(int x) { static volatile int y1 = 100; volatile int x1 = x; return y1 / x1; } #endif // Process device driver request to initialize the decoder #if _ALLOCATOR bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile) #else bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile) #endif { CODESET codesets[CODEC_NUM_CODESETS]; int i; int cpus; //int x = 0; #if CODEC_NUM_CODESETS == 3 memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET)); memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET)); #elif CODEC_NUM_CODESETS == 2 memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET)); #else memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); #endif #ifdef _WIN32 // Set the handler for system exceptions SetDefaultExceptionHandler(); #endif //TestException(x); // Clear all decoder fields except the logfile and set the codebooks for decoding InitDecoder(decoder, logfile, &codesets[0]); #if _ALLOCATOR decoder->allocator = allocator; #endif if(decoder->thread_cntrl.capabilities == 0) { // Determine the processor capabilities SetDecoderCapabilities(decoder); } cpus = decoder->thread_cntrl.capabilities >> 16; assert(cpus > 0 && cpus <= _MAX_CPUS); // Decode to half resolution? if (resolution == DECODED_RESOLUTION_HALF) { // Reduce the frame size by half in each dimension width = width/2; height = height/2; } else if (resolution == DECODED_RESOLUTION_QUARTER) { // Reduce the frame size by one fourth in each dimension width = width/4; height = height/4; } // Initialize the codebooks #if _ALLOCATOR if (!InitCodebooks(decoder->allocator, codesets)) { //decoder->error = CODEC_ERROR_INIT_CODEBOOKS; // The subroutine has already set the error code return false; } #else if (!InitCodebooks(codesets)) { //decoder->error = CODEC_ERROR_INIT_CODEBOOKS; // The subroutine has already set the error code return false; } #endif // Initize the FSM InitDecoderFSM(decoder, &codesets[0]); // Check the frame dimensions and format //assert(width > 0); //assert(height > 0); // assert(IsValidFormat(format)); #if _THREADED_DECODER // Create a semaphore to signal the transform thread to begin processing // Initialize the transform queue decoder->transform_queue.started = 0; decoder->transform_queue.num_entries = 0; decoder->transform_queue.next_entry = 0; decoder->transform_queue.free_entry = 0; memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue)); #endif #if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0 StartInterlaceWorkerThreads(decoder); #endif #if _THREADED #if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START if(cpus > 1) { int threads = cpus; if(threads > 4) threads = 4; CreateLock(&decoder->entropy_worker_new.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->entropy_worker_new.pool, threads, EntropyWorkerThreadProc, decoder); } // Initialize the lock that controls access to the generic worker thread data CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, cpus, WorkerThreadProc, decoder); #endif #endif // Set the frame dimensions and format SetDecoderFormat(decoder, width, height, format, resolution); // Allocate the data structure for decoding the samples AllocDecoderGroup(decoder); // Note that this code assumes that the samples to decode are groups // as opposed to isolated frames which are not supported in this code // Allocate a buffer for storing intermediate results during decoding if (!AllocDecoderBuffer(decoder, width, height, format)) { return false; } // Should check that the finite state machine tables were initialized assert(decoder->fsm[0].table.flags < 0); // Initialize the finite state machine for this decoder for(i=0; i<CODEC_NUM_CODESETS; i++) { InitFSM(&decoder->fsm[i], codesets[i].fsm_table); #if _COMPANDING // Scale the values in the finite state machine entries for companding ScaleFSM(&decoder->fsm[i].table); #endif } // Indicate that the decoder has been initialized decoder->state = DECODER_STATE_INITIALIZED; #if (1 && DUMP) // Write the wavelet bands as images SetDumpDirectory(CODEC_TYPE(decoder), DUMP_DECODER_DIRECTORY); SetDumpFilename(CODEC_TYPE(decoder), DUMP_DEFAULT_FILENAME); SetDumpChannelMask(CODEC_TYPE(decoder), 1/*ULONG_MAX*/); // SetDumpWaveletMask(CODEC_TYPE(decoder), 7<<4 | 1/*ULONG_MAX*/); SetDumpWaveletMask(CODEC_TYPE(decoder), ULONG_MAX); // Set this flag to enable output decoder->dump.enabled = true; #endif #if _TIMING // Initialize the global timers and counters InitTiming(); #endif //DAN20160203 Fix for a memory leak in InitCookbooks for (i = 0; i < CODEC_NUM_CODESETS; i++) { #if _ALLOCATOR Free(allocator, codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL; Free(allocator, codesets[i].fastbook); codesets[i].fastbook = NULL; Free(allocator, codesets[i].valuebook); codesets[i].valuebook = NULL; #else MEMORY_FREE(codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL; MEMORY_FREE(codesets[i].fastbook); codesets[i].fastbook = NULL; MEMORY_FREE(codesets[i].valuebook); codesets[i].valuebook = NULL; #endif } // The decoder has been initialized successfully return true; } void DecodeEntropyInit(DECODER *decoder) { int cpus = 1; if(decoder->thread_cntrl.capabilities == 0) { // Determine the processor capabilities SetDecoderCapabilities(decoder); } cpus = decoder->thread_cntrl.capabilities >> 16; if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit) { cpus = decoder->cfhddata.cpu_limit; decoder->thread_cntrl.limit = cpus; decoder->thread_cntrl.set_thread_params = 1; decoder->thread_cntrl.capabilities &= 0xffff; decoder->thread_cntrl.capabilities |= cpus<<16; } assert(cpus > 0 && cpus <= _MAX_CPUS); #if _THREADED #if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START if(cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0) { int threads = cpus; if(threads > 4) threads = 4; CreateLock(&decoder->entropy_worker_new.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->entropy_worker_new.pool, threads, EntropyWorkerThreadProc, decoder); } #endif #endif } bool DecodeOverrides(DECODER *decoder, unsigned char *overrideData, int overrideSize) { if(decoder->overrideData) { #if _ALLOCATOR Free(decoder->allocator, decoder->overrideData); #else MEMORY_FREE(decoder->overrideData); #endif decoder->overrideData = NULL; decoder->overrideSize = 0; } if(overrideSize) { #if _ALLOCATOR decoder->overrideData = Alloc(decoder->allocator, overrideSize); #else decoder->overrideData = MEMORY_ALLOC(overrideSize); #endif if(decoder->overrideData) { memcpy(decoder->overrideData, overrideData, overrideSize); decoder->overrideSize = overrideSize; } } else { int i; for(i=METADATA_PRIORITY_OVERRIDE; i<=METADATA_PRIORITY_MAX; i++) //This was 0 to max but that cause right eye primary corrections(side-by-side) mode to flicker. // This database cleariing was added but I don't know why. { if(decoder->DataBases[i]) { #if _ALLOCATOR Free(decoder->allocator, decoder->DataBases[i]); #else MEMORY_FREE(decoder->DataBases[i]); #endif decoder->DataBases[i] = NULL; decoder->DataBasesSize[i] = 0; decoder->DataBasesAllocSize[i] = 0; } } } return true; } TRANSFORM *AllocGroupTransform(GROUP *group, int channel) { #if _ALLOCATOR //TODO:ALLOC Change this routine to take an allocator as the first argument ALLOCATOR *allocator = NULL; #endif TRANSFORM *transform; // Channel zero is a special case because it may mean // that the group header has not been decoded yet if (channel != 0) { // Make sure that the channel number is in range assert(0 <= channel && channel < group->header.num_channels); if (!(0 <= channel && channel < group->header.num_channels)) return NULL; } transform = group->transform[channel]; // Need to allocate a transform data structure? if (transform == NULL) { #if _ALLOCATOR transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM)); #else transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM)); #endif assert(transform != NULL); if (transform == NULL) return NULL; memset(transform, 0, sizeof(TRANSFORM)); group->transform[channel] = transform; #if _TIMING alloc_transform_count++; #endif } return transform; } //extern FILE *logfile; void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format) { size_t size = height * pitch; union { uint8_t byte[4]; uint32_t word; } output; switch (format) { case DECODED_FORMAT_YUYV: output.byte[0] = COLOR_LUMA_BLACK; output.byte[1] = COLOR_CHROMA_ZERO; output.byte[2] = COLOR_LUMA_BLACK; output.byte[3] = COLOR_CHROMA_ZERO; break; default: //if (logfile) fprintf(logfile,"**Unknown format: %d\n", format); //assert(0); output.word = 0; break; } memset(buffer, output.word, size); } // Decode the coefficients in a subband bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband); // Decode the coefficients in a lowpass band bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet); // Decode the coefficients in a highpass band bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading); // Decode an empty band bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band); bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height); bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height); // Decode a sample channel header bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input); // Apply the inverse horizontal-temporal transform to reconstruct the output frame void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); #if 0 // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, uint8_t *frame1, uint8_t *frame2, int output_pitch, FRAME_INFO *info, char *buffer, size_t buffer_size); #else // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, int frame_index, uint8_t *output, int output_pitch, FRAME_INFO *info, const SCRATCH *scratch, int precision); #endif // Copy the quarter resolution lowpass channels from the spatial transform void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision); // Convert the quarter resolution lowpass channels to the specified output format void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision); // Routines for converting the new encoded formats to the requested output format CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); // The first Bayer routine calls the other Bayer routines for the decoded resolution CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); // New code for handling the original YUV 4:2:2 encoded format CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); // Return true if the rest of the channel does not have to be decoded static bool CanSkipChannel(DECODER *decoder, int resolution) { CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; TRANSFORM *transform = decoder->transform[channel]; int transform_type = transform->type; // Can the rest of the channel be skipped? if (transform_type == TRANSFORM_TYPE_FIELDPLUS) { switch (resolution) { case DECODED_RESOLUTION_HALF: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF); break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER); break; case DECODED_RESOLUTION_LOWPASS_ONLY: return (codec->decoded_subband_flags & 1); break; default: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY) { // If we are requesting a YUV decode we don't need the 4th channel if(codec->channel == 3) { return true; } } } break; } } else { const uint32_t decoded_subband_mask_half = 0x7F; const uint32_t decoded_subband_mask_quarter = 0x0F; assert(transform_type == TRANSFORM_TYPE_SPATIAL); switch (resolution) { case DECODED_RESOLUTION_HALF: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half); break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter); break; case DECODED_RESOLUTION_LOWPASS_ONLY: return (codec->decoded_subband_flags & 1); break; default: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY) { // If we are requesting a YUV decode we don't need the 4th channel if(codec->channel == 3) { return true; } } } break; } } // Cannot skip the rest of the channel return false; } #if 0 static bool CanSkipSubband(DECODER *decoder, int subband) { // Bitmask indicates which subbands must be decoded for quarter resolution static uint32_t quarter_resolution_mask = 0x008F; // Convert the subband number into a bitmask (could use a lookup table) uint32_t subband_mask = SUBBAND_MASK(subband); // Select the resolution of the fully decoded frames int resolution = decoder->frame.resolution; switch (resolution) { case DECODED_RESOLUTION_QUARTER: //if (4 <= subband && subband <= 6) if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if ((subband_mask & quarter_resolution_mask) == 0) { return true; } } break; default: // Assume that the subband must be decoded break; } return false; } #endif // Return true if the wavelet exists and all bands are valid static bool AllBandsValid(IMAGE *wavelet) { return (wavelet != NULL && BANDS_ALL_VALID(wavelet)); } #if DEBUG static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index) { int channel; if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) { assert(0); return false; } if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) { assert(0); return false; } for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform_array[channel]->wavelet[frame_index]; if (!AllBandsValid(wavelet)) { return false; } } // All wavelet bands in all channels are valid return true; } static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index) { int channel; if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) { return false; } if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) { return false; } for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform_array[channel]->wavelet[frame_index]; if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0))) { return false; } } // All lowpass bands in all channels are valid return true; } #endif static bool ComputeFrameDimensionsFromFirstWavelet(int transform_type, int first_wavelet_width, int first_wavelet_height, int *frame_width_out, int *frame_height_out) { int frame_width; int frame_height; int expansion = 8; switch (transform_type) { case TRANSFORM_TYPE_SPATIAL: frame_width = first_wavelet_width * expansion; frame_height = first_wavelet_height * expansion; break; case TRANSFORM_TYPE_FIELDPLUS: frame_width = first_wavelet_width * expansion; frame_height = first_wavelet_height * expansion; break; default: assert(0); return false; } // Return the frame dimensions *frame_width_out = frame_width; *frame_height_out = frame_height; return true; } // Decode the sample header to determine the type of sample and other parameters bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header) { TAGVALUE segment; int sample_type; int sample_size = 0; // Group index uint32_t channel_size[TRANSFORM_MAX_CHANNELS]; // Number of channels in the group index int channel_count; // Values used for computing the frame width and height (if necessary) int transform_type = -1; int first_wavelet_width = 0; int first_wavelet_height = 0; int display_height = 0; int current_channel = 0; int currentVideoChannel = header->videoChannels; int find_lowpass_bands = header->find_lowpass_bands & 1; int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0; int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0; if (header == NULL) { return false; } if(currentVideoChannel == 0) currentVideoChannel = 1; // Clear the entire sample header to prevent early return from this routine memset(header, 0, sizeof(SAMPLE_HEADER)); // Clear the error code header->error = CODEC_ERROR_OKAY; // Initialize the frame dimensions to unknown header->width = 0; header->height = 0; header->videoChannels = 1; // Initialize the original pixel format to unknown header->input_format = COLOR_FORMAT_UNKNOWN; // Initialize the encoded format to unknown header->encoded_format = ENCODED_FORMAT_UNKNOWN; // Clear the frame number in case it is not present in the sample header->frame_number = 0; // The video is not progressive if the sample flags are not present header->hdr_progressive = false; #if _BITSTREAM_UNALIGNED // Record the alignment of the bitstream within the sample SetBitstreamAlignment(input, 0); #endif sample_size = input->nWordsUsed; // Get the type of sample (should be the first tag value pair) segment = GetTagValue(input); assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { header->error = CodecErrorBitstream(input); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames header->key_frame = true; header->difference_frame = false; header->droppable_frame = false; break; case SAMPLE_TYPE_FRAME: // The second or later frame in a group header->key_frame = false; header->difference_frame = true; header->droppable_frame = true; break; case SAMPLE_TYPE_IFRAME: // One frame in the group header->key_frame = true; header->difference_frame = false; header->droppable_frame = true; break; case SAMPLE_TYPE_SEQUENCE_HEADER: // Treat the video sequence header like a keyframe that can be dropped header->key_frame = true; header->difference_frame = false; header->droppable_frame = true; break; default: // Unknown type of sample header->error = CODEC_ERROR_SAMPLE_TYPE; return false; break; } // Continue parsing the sample header until all of the information has been found while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all (find_uncompressed == 1 && current_channel < 1) || display_height == 0 || header->width == 0 || header->height == 0 || header->input_format == COLOR_FORMAT_UNKNOWN || header->frame_number == 0 || (header->interlaced_flags == 0 && header->hdr_progressive == 0)) { int chunksize = 0; // Get the next tag value pair from the bitstream segment = GetSegment(input); // Did the bitstream end before the last tag was found? if (input->error == BITSTREAM_ERROR_UNDERFLOW) { break; } // Did an error occur while reading the bitstream? if (input->error != BITSTREAM_ERROR_OKAY) { header->error = CodecErrorBitstream(input); return false; } // Is this an optional tag? if (segment.tuple.tag < 0) { segment.tuple.tag = NEG(segment.tuple.tag); } if(segment.tuple.tag & 0x2000) { chunksize = segment.tuple.value; chunksize &= 0xffff; chunksize += ((segment.tuple.tag&0xff)<<16); } else if(segment.tuple.tag & 0x4000) { chunksize = segment.tuple.value; chunksize &= 0xffff; } // else if(tag == CODEC_TAG_INDEX) // handled below // { // chunksize = value; // chunksize &= 0xffff; // } else { chunksize = 0; } if((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000) { int skip = 1; if((segment.tuple.tag & 0xff00) == 0x2200) //sample size { if(sample_size < chunksize*4) find_header_info_only = 1; skip = find_header_info_only; if(currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only) { BITSTREAM input2; SAMPLE_HEADER header2; BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize*4); int eye_offset = sample_size - input->nWordsUsed + chunksize*4; //approx int eye_sample_size = input->nWordsUsed - eye_offset; // Search for first sample of the next frame while((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0) { eye2 += 4; chunksize ++; eye_offset += 4; eye_sample_size -= 4; } // Save the offset to the right stereo sample header->left_sample_size = eye_offset; { InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ); memset(&header2, 0, sizeof(SAMPLE_HEADER)); header2.find_lowpass_bands = 1; currentVideoChannel++; header2.videoChannels = currentVideoChannel; if(ParseSampleHeader(&input2, &header2)) { int i; for(i=0;i<4;i++) { if(header2.thumbnail_channel_offsets[i]) header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i]; } } } } } if((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size { header->hdr_uncompressed = 1; skip = 1; if(find_lowpass_bands != 1) break; } if((segment.tuple.tag & 0xff00) == 0x2100) //level { if(find_lowpass_bands == 1) { skip = 0; } else { skip = 1; // no header data after the fix level break; } } if(chunksize) { if(skip) { input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; } } else { switch (segment.tuple.tag) { case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP. header->encoder_version = (((segment.tuple.value>>12) & 0xf)<<16) | (((segment.tuple.value>>8) & 0xf)<<8) | ((segment.tuple.value) & 0xff); break; case CODEC_TAG_INDEX: // Get the number of channels in the index to skip channel_count = segment.tuple.value; DecodeGroupIndex(input, (uint32_t *)&channel_size[0], channel_count); break; case CODEC_TAG_FRAME_WIDTH: // Record the frame width in the sample header header->width = segment.tuple.value; break; case CODEC_TAG_FRAME_HEIGHT: // Record the frame height in the sample header header->height = segment.tuple.value; break; case CODEC_TAG_FRAME_DISPLAY_HEIGHT: display_height = segment.tuple.value; break; case CODEC_TAG_LOWPASS_WIDTH: // Save the width of the smallest wavelet for computing the frame dimensions first_wavelet_width = segment.tuple.value; break; case CODEC_TAG_LOWPASS_HEIGHT: // Save the height of the smallest wavelet for computing the frame dimensions first_wavelet_height = segment.tuple.value; break; case CODEC_TAG_TRANSFORM_TYPE: // Save the type of transform for computing the frame dimensions (if necessary) transform_type = segment.tuple.value; break; case CODEC_TAG_INPUT_FORMAT: // Record the original format of the encoded frames header->input_format = (COLOR_FORMAT)segment.tuple.value; break; case CODEC_TAG_ENCODED_FORMAT: case CODEC_TAG_OLD_ENCODED_FORMAT: // Record the encoded format (internal representation) header->encoded_format = (ENCODED_FORMAT)segment.tuple.value; if(header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3) header->encoded_format = ENCODED_FORMAT_RGB_444; break; case CODEC_TAG_FRAME_NUMBER: // Record the frame number for debugging header->frame_number = segment.tuple.value; break; case CODEC_TAG_INTERLACED_FLAGS: // Record the flags that indicate the field type header->interlaced_flags = segment.tuple.value; break; case CODEC_TAG_SAMPLE_FLAGS: // The sample flags specify progressive versus interlaced decoding header->hdr_progressive = !!(segment.tuple.value & SAMPLE_FLAGS_PROGRESSIVE); if (header->hdr_progressive) { // Clear the interlaced flags header->interlaced_flags = 0; } break; case CODEC_TAG_LOWPASS_SUBBAND: if(segment.tuple.value == 0) // low pass band { int count = 8; uint32_t *lptr = (uint32_t *)input->lpCurrentWord; do { uint32_t longword = SwapInt32(lptr[count]); unsigned short t,v; t = (longword>>16) & 0xffff; v = (longword) & 0xffff; if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4) { header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count*4 + 4; break; } count++; } while(count < 32); current_channel++; } break; case CODEC_TAG_ENCODED_CHANNELS: if(header->videoChannels == 1) { header->videoChannels = segment.tuple.value; if(header->videoChannels < 1) header->videoChannels = 1; } break; case CODEC_TAG_QUALITY_L: // header->encode_quality &= 0xffff0000; header->encode_quality |= segment.tuple.value; break; case CODEC_TAG_QUALITY_H: // header->encode_quality &= 0xffff; header->encode_quality |= segment.tuple.value<<16; break; } // Have the encoded frame dimensions been computed? if (header->width == 0 || header->height == 0) { // Found the first wavelet in the bitstream? if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0) { // The group header did not contain tags for the frame dimensions // prior to the release of support for RGB 4:4:4, so must attempt to // compute the frame dimensions from the dimensions of the lowpass band. int frame_width = 0; int frame_height = 0; // Use the dimensions of the first wavelet to compute the frame width and height if (!ComputeFrameDimensionsFromFirstWavelet(transform_type, first_wavelet_width, first_wavelet_height, &frame_width, &frame_height)) { // Could not compute the frame dimensions header->error = CODEC_ERROR_FRAME_DIMENSIONS; return false; } // Save the frame dimensions in the sample header header->width = frame_width; header->height = frame_height; // No more header information after finding the lowpass band break; } } if(find_lowpass_bands != 1 && find_uncompressed != 1) { // No more header information after the first encoded band if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER) { // Stop looking for header information break; } // No more header information after the frame index if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX) { // Stop looking for header information break; } // No more header information after the lowpass band header if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH) { // Stop looking for header information break; } } } } } if (header->width == 0 || header->height == 0) { assert(0); } // Fill in the encoded format if it was not present in the header if (header->encoded_format == ENCODED_FORMAT_UNKNOWN) { header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count); } if (display_height > 0) { header->height = display_height; } if (header->encoded_format == ENCODED_FORMAT_BAYER) { header->width *= 2; header->height *= 2; if(display_height == 0) { if(header->height == 1088) header->height = 1080; } } // Return true if the header was parsed completely and correctly return (header->width > 0 && header->height > 0 && ((sample_type == SAMPLE_TYPE_FRAME) || (header->input_format != COLOR_FORMAT_UNKNOWN && header->encoded_format != ENCODED_FORMAT_UNKNOWN))); // It is not an error if the frame number was not found in the sample header } bool DumpSampleHeader(BITSTREAM *input, FILE *logfile) { TAGVALUE segment; int lowpass_width = 0; int lowpass_height = 0; // Parse the sample header until the lowpass band is found while (lowpass_width == 0 && lowpass_height == 0) { // Get the next tag value pair from the bitstream segment = GetSegment(input); // Did an error occur while reading the bitstream? if (input->error != BITSTREAM_ERROR_OKAY) { return false; } // Is this an optional tag? if (segment.tuple.tag < 0) { segment.tuple.tag = NEG(segment.tuple.tag); } // Check that the tag is valid assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED); switch (segment.tuple.tag) { case CODEC_TAG_SAMPLE: fprintf(logfile, "Sample type: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_WIDTH: fprintf(logfile, "Frame width: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_HEIGHT: fprintf(logfile, "Frame height: %d\n", segment.tuple.value); break; case CODEC_TAG_LOWPASS_WIDTH: lowpass_width = segment.tuple.value; fprintf(logfile, "Lowpass width: %d\n", lowpass_width); break; case CODEC_TAG_LOWPASS_HEIGHT: lowpass_height = segment.tuple.value; fprintf(logfile, "Lowpass height: %d\n", lowpass_height); break; case CODEC_TAG_TRANSFORM_TYPE: fprintf(logfile, "Transform type: %d\n", segment.tuple.value); break; case CODEC_TAG_INPUT_FORMAT: fprintf(logfile, "Input format: %d\n", segment.tuple.value); break; case CODEC_TAG_ENCODED_FORMAT: case CODEC_TAG_OLD_ENCODED_FORMAT: fprintf(logfile, "Encoded format: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_NUMBER: fprintf(logfile, "Frame number: %d\n", segment.tuple.value); break; } } return true; } int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work { TAGWORD tag,value=1; unsigned char *pos = NULL; int readsize = input->nWordsUsed; if(readsize > 4096) // only need to scan the first few tuplets { readsize = 4096; } else { //Tiny therefore P-frame, nothing to be read so: value=decoder->real_channels; // return the last value. return value; } pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value); if(pos && value>1 && skip_to_channel>1) { int chunksize = 0; intptr_t offset; int count = 0; do { tag = *pos++<<8; tag |= *pos++; value = *pos++<<8; value |= *pos++; if (tag < 0) { tag = NEG(tag); } } while((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10); if((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize*4; input->lpCurrentWord += offset; input->nWordsUsed -= (int)offset; { uint8_t *tag = (uint8_t *)input->lpCurrentWord; // Search for first sample of the next frame while((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0) { input->lpCurrentWord += 4; input->nWordsUsed -= 4; tag += 4; } } } } //if(value == 0) value = 1; // old non-stereo file return value; } #define SUBPIXEL 64 static short gains[SUBPIXEL+1][4] = { {0*128,0*128,0x7fff,0*128}, {0*128,2*128,0x7fff,-2*128}, {0*128,5*128,255*128,-4*128}, {0*128,8*128,254*128,-6*128}, {0*128,11*128,253*128,-8*128}, {0*128,14*128,252*128,-10*128}, {0*128,18*128,250*128,-12*128}, {0*128,21*128,248*128,-13*128}, {-1*128,25*128,247*128,-15*128}, {-1*128,29*128,244*128,-16*128}, {-1*128,33*128,241*128,-17*128}, {-2*128,37*128,239*128,-18*128}, {-2*128,41*128,236*128,-19*128}, {-3*128,46*128,233*128,-20*128}, {-3*128,50*128,229*128,-20*128}, {-4*128,55*128,226*128,-21*128}, {-4*128,60*128,221*128,-21*128}, {-5*128,65*128,217*128,-21*128}, {-5*128,70*128,213*128,-22*128}, {-6*128,75*128,209*128,-22*128}, {-7*128,80*128,205*128,-22*128}, {-7*128,85*128,199*128,-21*128}, {-8*128,91*128,194*128,-21*128}, {-9*128,96*128,190*128,-21*128}, {-10*128,102*128,185*128,-21*128}, {-10*128,107*128,179*128,-20*128}, {-11*128,113*128,174*128,-20*128}, {-12*128,118*128,169*128,-19*128}, {-13*128,124*128,164*128,-19*128}, {-14*128,129*128,159*128,-18*128}, {-14*128,135*128,152*128,-17*128}, {-15*128,141*128,147*128,-17*128}, {-16*128,144*128,144*128,-16*128}, {-17*128,147*128,141*128,-15*128}, {-17*128,152*128,135*128,-14*128}, {-18*128,159*128,129*128,-14*128}, {-19*128,164*128,124*128,-13*128}, {-19*128,169*128,118*128,-12*128}, {-20*128,174*128,113*128,-11*128}, {-20*128,179*128,107*128,-10*128}, {-21*128,185*128,102*128,-10*128}, {-21*128,190*128,96*128,-9*128}, {-21*128,194*128,91*128,-8*128}, {-21*128,199*128,85*128,-7*128}, {-22*128,205*128,80*128,-7*128}, {-22*128,209*128,75*128,-6*128}, {-22*128,213*128,70*128,-5*128}, {-21*128,217*128,65*128,-5*128}, {-21*128,221*128,60*128,-4*128}, {-21*128,226*128,55*128,-4*128}, {-20*128,229*128,50*128,-3*128}, {-20*128,233*128,46*128,-3*128}, {-19*128,236*128,41*128,-2*128}, {-18*128,239*128,37*128,-2*128}, {-17*128,241*128,33*128,-1*128}, {-16*128,244*128,29*128,-1*128}, {-15*128,247*128,25*128,-1*128}, {-13*128,248*128,21*128,0*128}, {-12*128,250*128,18*128,0*128}, {-10*128,252*128,14*128,0*128}, {-8*128,253*128,11*128,0*128}, {-6*128,254*128,8*128,0*128}, {-4*128,255*128,5*128,0*128}, {-2*128,0x7fff,2*128,0*128}, {0*128,0*128,0x7fff,0*128} }; static int lanczos[256] = { 0, -2, -8, -18, -33, -53, -77, -106, -141, -179, -223, -272, -325, -384, -447, -514, -586, -662, -742, -826, -913, -1004, -1097, -1193, -1290, -1389, -1490, -1591, -1692, -1792, -1892, -1990, -2086, -2179, -2269, -2355, -2436, -2511, -2580, -2643, -2697, -2744, -2781, -2809, -2826, -2832, -2826, -2808, -2776, -2730, -2670, -2594, -2503, -2395, -2271, -2129, -1969, -1790, -1593, -1377, -1141, -886, -611, -315, 0, 336, 692, 1069, 1466, 1884, 2321, 2778, 3255, 3750, 4265, 4797, 5347, 5914, 6498, 7097, 7711, 8340, 8982, 9636, 10301, 10977, 11663, 12357, 13058, 13765, 14477, 15192, 15910, 16630, 17349, 18066, 18781, 18871, 19580, 20285, 20986, 21678, 22361, 23035, 23697, 24348, 24983, 25604, 26206, 26790, 27354, 27898, 28419, 28915, 29387, 29832, 30249, 30638, 30997, 31326, 31623, 31886, 32117, 32314, 32476, 32603, 32695, 32749, 32767, //was 32768, issue for SSE2 32749, 32695, 32603, 32476, 32314, 32117, 31886, 31623, 31326, 30997, 30638, 30249, 29832, 29387, 28915, 28419, 27898, 27354, 26790, 26206, 25604, 24983, 24348, 23697, 23035, 22361, 21678, 20986, 20285, 19580, 18871, 18159, 18066, 17349, 16630, 15910, 15192, 14477, 13765, 13058, 12357, 11663, 10977, 10301, 9636, 8982, 8340, 7711, 7097, 6498, 5914, 5347, 4797, 4265, 3750, 3255, 2778, 2321, 1884, 1466, 1069, 692, 336, 0, -315, -611, -886, -1141, -1377, -1593, -1790, -1969, -2129, -2271, -2395, -2503, -2594, -2670, -2730, -2776, -2808, -2826, -2832, -2826, -2809, -2781, -2744, -2697, -2643, -2580, -2511, -2436, -2355, -2269, -2179, -2086, -1990, -1892, -1792, -1692, -1591, -1490, -1389, -1290, -1193, -1097, -1004, -913, -826, -742, -662, -586, -514, -447, -384, -325, -272, -223, -179, -141, -106, -77, -53, -33, -18, -8, -2, }; void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset, float zoom) { float yposf,ystepf; int x; //int endofSSEline = 0; unsigned short *scanline[4]; //int spitch = pitch/2; int neg = 0,step; __m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1; __m128i *lineA, *lineB, *lineC, *lineD, *outline128; offset = -offset; yposf = height * offset; yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset); ystepf = 1.0f/zoom; if(yposf < 0.0) neg = 1; if(pitch < 0) yposf -= ystepf; /* yposi = floor(yposf); remainf = yposf - (float)yposi; tablepos = (remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required */ // -3 , 0 best small notch at zero? // switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: step = 16; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: step = 32; break; } { static char zeroline[1024] = {0}; int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height)); unsigned char *src = (unsigned char *)RGB48; unsigned char *dst = (unsigned char *)RGB48; unsigned char *ptr = (unsigned char *)buffer; if(yoffset < 0) yoffset = 0; if(yend > height) yend = height; src += pitch * yoffset; for(y=yoffset; y<yend; y++) { memcpy(ptr, src, widthbytes); ptr += widthbytes; src += pitch; } ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int i,t,yp = ((int)yposf); int rmdr = 63-((int)(yposf*64.0) & 63); int gains[4]; yp -= 1; // use -2 cause a image down shift //DAN20100225 t = 0; for(i=0; i<4; i++) { if(yp<0 || yp>= height) // skip 0 line as the top line was zagged { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)zeroline; } else { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)]; } yp++; rmdr+=64; } if(t) { __m128i half; gA = _mm_set1_epi16(gains[0]); gB = _mm_set1_epi16(gains[1]); gC = _mm_set1_epi16(gains[2]); gD = _mm_set1_epi16(gains[3]); outline128 = (__m128i *)dst; lineA = (__m128i *)scanline[0]; lineB = (__m128i *)scanline[1]; lineC = (__m128i *)scanline[2]; lineD = (__m128i *)scanline[3]; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); _mm_storeu_si128(outline128++, o128); lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); _mm_storeu_si128(outline128++, o128); } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); _mm_storeu_si128(outline128++, o128); lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); _mm_storeu_si128(outline128++, o128); } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA); lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB); lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC); lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD); lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); half = o128; lA = _mm_loadu_si128(lineA++); lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB++); lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC++); lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD++); lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); half = _mm_srli_epi16(half,8); o128 = _mm_srli_epi16(o128,8); o128 = _mm_packus_epi16(o128, half); _mm_storeu_si128(outline128++, o128); } break; } } else { if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV) { memset(dst, 0x10801080, widthbytes); } else { memset(dst, 0, widthbytes); } } yposf += ystepf; dst += pitch; } /*ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int r,g,b,yp = ((int)yposf); yposf += ystepf; if(yp<0 || yp>= height) { memset(dst, 0, widthbytes); } else { memcpy(dst, &ptr[widthbytes*yp], widthbytes); } dst += pitch; }*/ } } void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset, float zoom, int xx) { float yposf,ystepf; //int endofSSEline = 0; unsigned short *scanline[4]; //int spitch = pitch/2; int neg = 0,step; __m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1; uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos; uint8_t *outlinePos8; uint16_t *outlinePos16; offset = -offset; //yposf = height * offset; yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset); ystepf = 1.0f/zoom; if(yposf < 0.0) neg = 1; if(pitch < 0) yposf -= ystepf; /* yposi = floor(yposf); remainf = yposf - (float)yposi; tablepos = (remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required */ // -3 , 0 best small notch at zero? // switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: step = 4; break; case DECODED_FORMAT_RGB24: step = 3; break; case DECODED_FORMAT_YUYV: step = 4; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: step = 8; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: step = 6; break; default: assert(0); break; } { static char zeroline[1024] = {0}; int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height)); unsigned char *src = (unsigned char *)RGB48; unsigned char *dst = (unsigned char *)RGB48; unsigned char *ptr = (unsigned char *)buffer; if(yoffset < 0) yoffset = 0; if(yend > height) yend = height; src += pitch * yoffset; for(y=yoffset; y<yend; y++) { memcpy(ptr, src, widthbytes); ptr += widthbytes; src += pitch; } ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int i,t,yp = ((int)yposf); int rmdr = 63-((int)(yposf*64.0) & 63); int gains[4]; yp -= 1; // use -2 cause a image down shift //DAN20100225 t = 0; for(i=0; i<4; i++) { if(yp<0 || yp>= height) // skip 0 line as the top line was zagged { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)zeroline; } else { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)]; } yp++; rmdr+=64; } if(t) { gA = _mm_set1_epi16(gains[0]); gB = _mm_set1_epi16(gains[1]); gC = _mm_set1_epi16(gains[2]); gD = _mm_set1_epi16(gains[3]); outlinePos8 = (uint8_t *)dst; outlinePos16 = (uint16_t *)dst; lineAPos = (uint8_t *)scanline[0]; lineBPos = (uint8_t *)scanline[1]; lineCPos = (uint8_t *)scanline[2]; lineDPos = (uint8_t *)scanline[3]; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8; o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16[3] = _mm_extract_epi16(o128, 3); outlinePos16+=4; break; case DECODED_FORMAT_WP13: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6; o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16+=3; break; case DECODED_FORMAT_RG64: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8; lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16[3] = _mm_extract_epi16(o128, 3); outlinePos16+=4; break; case DECODED_FORMAT_RG48: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6; lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16+=3; break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=4; lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=4; lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=4; lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=4; lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_srli_epi16(o128,4); outlinePos8[0] = _mm_extract_epi16(o128, 0); outlinePos8[1] = _mm_extract_epi16(o128, 1); outlinePos8[2] = _mm_extract_epi16(o128, 2); outlinePos8[3] = _mm_extract_epi16(o128, 3); outlinePos8+=4; break; case DECODED_FORMAT_RGB24: { int r,g,b; b = ((lineAPos[0] * gains[0])>>7) + ((lineBPos[0] * gains[1])>>7) + ((lineCPos[0] * gains[2])>>7) + ((lineDPos[0] * gains[3])>>7); //16-bit g = ((lineAPos[1] * gains[0])>>7) + ((lineBPos[1] * gains[1])>>7) + ((lineCPos[1] * gains[2])>>7) + ((lineDPos[1] * gains[3])>>7); //16-bit r = ((lineAPos[2] * gains[0])>>7) + ((lineBPos[2] * gains[1])>>7) + ((lineCPos[2] * gains[2])>>7) + ((lineDPos[2] * gains[3])>>7); //16-bit if(r<0) r = 0; if(r>65535) r = 65535; if(g<0) g = 0; if(g>65535) g = 65535; if(b<0) b = 0; if(b>65535) b = 65535; lineAPos+=3; lineBPos+=3; lineCPos+=3; lineDPos+=3; outlinePos8[0] = b >> 8; //b outlinePos8[1] = g >> 8; //g outlinePos8[2] = r >> 8; //r outlinePos8+=3; /* SSE2 can't load byte alligned lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3; lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3; lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3; lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3; lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_srli_epi16(o128,4); outlinePos8[0] = _mm_extract_epi16(o128, 0); //b outlinePos8[1] = _mm_extract_epi16(o128, 1); //g outlinePos8[2] = _mm_extract_epi16(o128, 2); //r outlinePos8+=3; */ } break; } } else { if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV) { memset(dst, 0x10801080, widthbytes); } else { memset(dst, 0, widthbytes); } } yposf += ystepf; dst += pitch; } } } void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset) { float yposf,remainf; int yposi,tablepos,x,y; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline[4], *tline; int spitch = pitch/2; int neg = 0,shift = 0,skip,step; int origwidthbytes = widthbytes; int origwidthextra; __m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1; __m128i *lineA, *lineB, *lineC, *lineD, *outline128; // offset = -offset; if(offset < 0.0) neg = 1; yposf = height * offset; yposi = (int)floor(yposf); remainf = yposf - (float)yposi; tablepos = (int)(remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required // -3 , 0 best small notch at zero? // if(neg) { yposi -= 2; gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; } else { yposi -= 1; //offset inherent in the table gainD = gains[tablepos][0]; gainC = gains[tablepos][1]; gainB = gains[tablepos][2]; gainA = gains[tablepos][3]; } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: skip = 4; step = 16; break; case DECODED_FORMAT_RGB24: skip = 3; step = 16; break; case DECODED_FORMAT_YUYV: skip = 2; step = 16; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: default: skip = 6; step = 32; break; } // scanline[0] = buffer; // scanline[1] = buffer + width*skip/2; // scanline[2] = buffer + width*skip/2*2; // scanline[3] = buffer + width*skip/2*3; widthbytes += (step - 1); widthbytes -= (widthbytes % step); origwidthextra = (origwidthbytes % step); scanline[0] = buffer; scanline[1] = buffer + widthbytes/2; scanline[2] = buffer + widthbytes/2*2; scanline[3] = buffer + widthbytes/2*3; for(y=0; y<4; y++) { if(yposi+y >=0 && yposi+y<height) { unsigned short *ptr = RGB48; if(neg) ptr += (height-1-yposi-y)*spitch; else ptr += (yposi+y)*spitch; memcpy(scanline[y], ptr, origwidthbytes); } else { memset(scanline[y], 0, origwidthbytes); } } { for(y=0;y<height; y++) { unsigned short *ptr = RGB48; if(neg) ptr += (height-y-1)*spitch; else ptr += y*spitch; outline128 = (__m128i *)ptr; lineA = (__m128i *)scanline[0]; lineB = (__m128i *)scanline[1]; lineC = (__m128i *)scanline[2]; lineD = (__m128i *)scanline[3]; //for(x=0;x<width*skip/2; x+=step) for(x=0;x<widthbytes; x+=step) { __m128i half; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); shift = 0; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128(lineA); lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB); lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC); lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD); lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; break; } o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); if(shift) { o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } if(skip == 6) //RGB48 || WP13 { if(widthbytes == origwidthbytes || x+16 < origwidthbytes) _mm_storeu_si128(outline128++, o128); else { //if(x < origwidthbytes+16/*bytes in an SSE2 reg*/) _mm_storeu_si128((__m128i *)scanline[0], o128); memcpy((char *)outline128, (char *)scanline[0], origwidthextra); outline128++; } } else { half = o128; } switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); shift = 0; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128(lineA++); lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB++); lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC++); lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD++); lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; break; } o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); if(shift) { o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } if(skip != 6) //!RGB48 || !WP13 { half = _mm_srli_epi16(half,8); o128 = _mm_srli_epi16(o128,8); o128 = _mm_packus_epi16(o128, half); } if(widthbytes == origwidthbytes || x+32 < origwidthbytes) { _mm_storeu_si128(outline128++, o128); } else { //if(x+16 < origwidthbytes+16) if(origwidthextra > 16) { _mm_storeu_si128((__m128i *)scanline[0], o128); memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16); } outline128++; } } tline = scanline[0]; scanline[0] = scanline[1]; scanline[1] = scanline[2]; scanline[2] = scanline[3]; scanline[3] = tline; if(yposi+y+4 >=0 && yposi+y+4<height) { unsigned short *ptr = RGB48; if(neg) ptr += (height-1-(yposi+y+4))*spitch; else ptr += (yposi+y+4)*spitch; memcpy(scanline[3], ptr, origwidthbytes); } else { memset(scanline[3], 0, origwidthbytes); } } } } void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,xstepf; int x; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0; float offset = hoffset; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 6; } } if(eye > 0) { zoom *= 1.0f + frameTilt; } else { zoom /= 1.0f + frameTilt; } xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset); xposf -= width * roffset * 0.5f / zoom; xposf += (float)line * ((float)width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0f/zoom; memcpy(scanline, RGB48, width*3*2); { //unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = (int)(xposf * 65536.0f); int ixstep = (int)(xstepf * 65536.0f); float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f; // int holdstart = width*5/10; // Use to specify a area of uniform stretch // int holdend = width*5/10; int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth*0.125)*(float)width); int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth*0.125)*(float)width); float flatxstep; float modified_xstep_avg; float bottomxstep; float basexstepstart; float basexstepend; float range; #if MMXSUPPORTED //TODO DANREMOVE __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); #endif if(holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth*0.5)*(float)width); if(holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth*0.5)*(float)width); range = (float)(holdend - holdstart); flatxstep = xstep-z*0.5f*xstep; modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range); bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg); if(holdstart == (width-holdend)) { basexstepstart = bottomxstep; basexstepend = bottomxstep; } else if(holdstart < (width-holdend)) { float a = (float)holdstart / (float)(width-holdend); float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep; float endavg = (modified_xstep_avg * ((float)width-range) - startavg * (float)holdstart) / (float)(width-holdend); basexstepstart = startavg - (flatxstep - startavg); basexstepend = endavg - (flatxstep - endavg); } else { float a = (float)(width-holdend) / (float)holdstart; float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep; float startavg = (modified_xstep_avg * ((float)width-range) - endavg * (float)(width-holdend)) / (float)holdstart; basexstepstart = startavg - (flatxstep - startavg); basexstepend = endavg - (flatxstep - endavg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4 && xx < (width-1)*3) //We need 3 values for RGB< yet we write 4, so the last pixel can't be done with MMX { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*3; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*3]); g += (gains * sscanline[xp*3+1]); b += (gains * sscanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*3]); g += (gains * scanline[xp*3+1]); b += (gains * scanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } } #if MMXSUPPORTED //TODO DANREMOVE //_mm_empty(); #endif } #if 0 //Why is this not used? void RGB48HoriShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,remainf,xstepf; int xposi,tablepos,x; int Ra,Rb,Rc,Rd; int Ga,Gb,Gc,Gd; int Ba,Bb,Bc,Bd; int gainA,gainB,gainC,gainD; int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0,shift = 0; float offset = hoffset; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 6; } } if(eye > 0) { zoom *= 1.0 + frameTilt; } else { zoom /= 1.0 + frameTilt; } xposf = (float)width*(0.5 - 1.0/(2.0*zoom) - offset); xposf -= width * roffset * 0.5 / zoom; xposf += (float)line * ((float)width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0/zoom; memcpy(scanline, RGB48, width*3*2); { unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = xposf * 65536.0; int ixstep = xstepf * 65536.0; float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0)*2.0; int holdstart = width*5/10; // Use to specify a area of uniform stretch int holdend = width*5/10; float flatxstep = xstep-z*0.5*xstep; float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart)); float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); if(bottomxstep < 0.0) { bottomxstep = 0.0; flatxstep = modified_xstep_avg + modified_xstep_avg; } if(flatxstep < 0.0) { flatxstep = 0.0; bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } /* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width); if(fxpos >= 0.0 && fxpos <= 1.0) { if(z > 0.0) { fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos); fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z); } else { fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos; fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z); } } */ xp = (fxpos * 65536.0*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*3; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else { int i,t,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { /* if(i == 3) //DAN20101112 this code was crashing disparity zoom { gains = lanczos[rmdr]>>1; r += (gains * sscanline[(xp-1)*3]); g += (gains * sscanline[(xp-1)*3+1]); b += (gains * sscanline[(xp-1)*3+2]); } else */ { gains += lanczos[rmdr]>>1; } } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*3]); g += (gains * sscanline[xp*3+1]); b += (gains * sscanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } /* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width); if(fxpos >= 0.0 && fxpos <= 1.0) { if(z > 0.0) { fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos); fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z); } else { fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos; fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z); } } */ xp = (fxpos * 65536.0*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else { int i,t,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { /* if(i == 3) //DAN20101112 this code was crashing disparity zoom { gains = lanczos[rmdr]>>1; r += (gains * scanline[(xp-1)*3]); g += (gains * scanline[(xp-1)*3+1]); b += (gains * scanline[(xp-1)*3+2]); } else */ { gains += lanczos[rmdr]>>1; } } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*3]); g += (gains * scanline[xp*3+1]); b += (gains * scanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } } /* memcpy(scanline, RGB48, width*3*2); { for(x=0;x<width*3; x+=3) //RGB { int r,g,b,xp = ((int)xposf)*3; xposf += xstepf; if(xp<0 || xp>= width*3) { RGB48[x] = 0; RGB48[x+1] = 0; RGB48[x+2] = 0; } else { r = scanline[xp]; g = scanline[xp+1]; b = scanline[xp+2]; RGB48[x] = r; RGB48[x+1] = g; RGB48[x+2] = b; } } } */ //_mm_empty(); } #endif void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,xstepf; int x; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0; float offset = hoffset; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*4) - 4; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 4; } } if(eye > 0) { zoom *= 1.0f + frameTilt; } else { zoom /= 1.0f + frameTilt; } xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset); xposf -= width * roffset * 0.5f; xposf += line * (width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0f/zoom; memcpy(scanline, RGB48, width*4*2); { //unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = (int)(xposf * 65536.0f); int ixstep = (int)(xstepf * 65536.0f); float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f; int holdstart = width*5/10; // Use to specify a area of uniform stretch int holdend = width*5/10; float flatxstep = xstep-z*0.5f*xstep; float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart)); float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); #if MMXSUPPORTED //TODO DANREMOVE __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); #endif if(bottomxstep < 0.0) { bottomxstep = 0.0; flatxstep = modified_xstep_avg + modified_xstep_avg; } if(flatxstep < 0.0) { flatxstep = 0.0; bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*4; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+4]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+8]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+12]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0,a=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*4]); g += (gains * sscanline[xp*4+1]); b += (gains * sscanline[xp*4+2]); a += (gains * sscanline[xp*4+3]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; a >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; if(a<0) a=0; else if(a>65535) a=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; RGB48[xx+3] = a; } xx+=4; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*4; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+4]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+8]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+12]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0,a=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*4]); g += (gains * scanline[xp*4+1]); b += (gains * scanline[xp*4+2]); a += (gains * scanline[xp*4+3]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; a >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; if(a<0) a=0; else if(a>65535) a=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; RGB48[xx+3] = a; } xx+=4; } } } #if MMXSUPPORTED //TODO DANREMOVE //_mm_empty(); #endif } void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask) { float line = (float)width * fabsf(windowMask); int pixelbytes = 6; float frac = (float)(line-(float)((int)line)); switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: pixelbytes = 8; break; } if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed { short *ptrL = (short *)RGB48; short *ptrR = (short *)RGB48; if(windowMask < 0) channel = channel == 0 ? 1 : 0; if(pixelbytes == 6) { if(channel == 0) { memset(ptrL, 0, 6*(int)line); ptrL += ((int)line*3); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); } else { ptrR += ((width-(int)line)*3); memset(ptrR, 0, 6*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); } } else { if(channel == 0) { memset(ptrL, 0, 8*(int)line); ptrL += ((int)line*4); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); ptrL[3] = (int)((float)ptrL[3] * (1.0-frac)); } else { ptrR += ((width-(int)line)*4); memset(ptrR, 0, 8*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac)); } } } else { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; if(windowMask < 0) channel = channel == 0 ? 1 : 0; if(pixelbytes == 6) { if(channel == 0) { memset(ptrL, 0, 6*(int)line); ptrL += ((int)line*3); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); } else { ptrR += ((width-(int)line)*3); memset(ptrR, 0, 6*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); } } else { if(channel == 0) { memset(ptrL, 0, 8*(int)line); ptrL += ((int)line*4); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); ptrL[3] = (int)((float)ptrL[3] * (1.0-frac)); } else { ptrR += ((width-(int)line)*4); memset(ptrR, 0, 8*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac)); } } } } void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip) { float xposf,remainf; int xposi,tablepos,x; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int neg = 0,shift = 0; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t1,t2,t3; t1 = ptrL[0]; ptrL[0] = ptrR[0]; ptrR[0] = t1; t2 = ptrL[1]; ptrL[1] = ptrR[1]; ptrR[1] = t2; t3 = ptrL[2]; ptrL[2] = ptrR[2]; ptrR[2] = t3; ptrL += 3; ptrR -= 3; } } if(offset < 0.0) neg = 1; xposf = width * offset; xposi = (int)floorf(xposf); remainf = xposf - (float)xposi; tablepos = (int)(remainf*(float)SUBPIXEL); xposi = abs(xposi); if(xposi==0 && tablepos == 0) return; // no move required gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; if(neg == 0) { unsigned short *ptr = scanline; int nwidth = width-xposi+16; if(nwidth > width) nwidth = width; for(x=0;x<xposi+2;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } memcpy(ptr, RGB48, (nwidth)*3*2); ptr += (nwidth)*3; for(x=0;x<16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+xposi-2>=0) { *ptr++ = RGB48[(x+xposi-2)*3];//r *ptr++ = RGB48[(x+xposi-2)*3+1];//g *ptr++ = RGB48[(x+xposi-2)*3+2];//b } else { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); ptr += (width-xposi)*3; for(x=0;x<xposi+16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3, //l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6 //l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8 if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*3; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA); //t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0 //t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4 //t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4 //l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_slli_si128(l2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0 //t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0 //t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5 //t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5 //l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,3*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0 //t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0 //t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0 //t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,6*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); t2 = _mm_slli_si128(l3,7*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD); o128 = _mm_adds_epi16(o128,t1); l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip) { float xposf,remainf; int xposi,tablepos,x; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int neg = 0,shift = 0; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*4) - 4; for(x=0;x<width/2;x++) { int t1,t2,t3,t4; t1 = ptrL[0]; ptrL[0] = ptrR[0]; ptrR[0] = t1; t2 = ptrL[1]; ptrL[1] = ptrR[1]; ptrR[1] = t2; t3 = ptrL[2]; ptrL[2] = ptrR[2]; ptrR[2] = t3; t4 = ptrL[2]; ptrL[3] = ptrR[3]; ptrR[3] = t4; ptrL += 4; ptrR -= 4; } } if(offset < 0.0) neg = 1; xposf = width * offset; xposi = (int)floorf(xposf); remainf = xposf - (float)xposi; tablepos = (int)(remainf*(float)SUBPIXEL); xposi = abs(xposi); if(xposi==0 && tablepos == 0) return; // no move required gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; if(neg == 0) { unsigned short *ptr = scanline; int nwidth = width-xposi+16; if(nwidth > width) nwidth = width; for(x=0;x<xposi+2;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } memcpy(ptr, RGB48, (nwidth)*4*2); ptr += (nwidth)*4; for(x=0;x<16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+xposi-2>=0) { *ptr++ = RGB48[(x+xposi-2)*4];//r *ptr++ = RGB48[(x+xposi-2)*4+1];//g *ptr++ = RGB48[(x+xposi-2)*4+2];//b *ptr++ = RGB48[(x+xposi-2)*4+3];//a } else { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } memcpy(ptr, &RGB48[xposi*4], (width-xposi)*4*2); ptr += (width-xposi)*4; for(x=0;x<xposi+16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2, //l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4, //l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6, //l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8, if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*4; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA); //t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0 //t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4 //t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4 //l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_slli_si128(l2,4*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0 //t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0 //t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4 //t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4 //l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_srli_si128(l2,4*2); t2 = _mm_slli_si128(t2,4*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0 //t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5 //t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_slli_si128(l3,4*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD); o128 = _mm_adds_epi16(o128,t1); l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offsetR, float offsetG, float offsetB , int flipR, int flipG, int flipB) { float Rxposf,Rremainf; int Rxposi,Rtablepos; float Gxposf,Gremainf; int Gxposi,Gtablepos; float Bxposf,Bremainf; int Bxposi,Btablepos; int x; int RgainA,RgainB,RgainC,RgainD; int GgainA,GgainB,GgainC,GgainD; int BgainA,BgainB,BgainC,BgainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int negR = 0; int negG = 0; int negB = 0; int shift = 0; __m128i l1,l2,l3,o128,t1,t2; __m128i *line128, *outline128; __m128i gA1,gB1,gC1,gD1,gA2,gB2,gC2,gD2,gA3,gB3,gC3,gD3; if(flipR) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(flipG) { unsigned short *ptrL = &RGB48[1]; unsigned short *ptrR = &RGB48[1]; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(flipB) { unsigned short *ptrL = &RGB48[2]; unsigned short *ptrR = &RGB48[2]; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(offsetR < 0.0) negR = 1; if(offsetG < 0.0) negG = 1; if(offsetB < 0.0) negB = 1; Rxposf = width * offsetR; Rxposi = (int)floorf(Rxposf); Rremainf = Rxposf - (float)Rxposi; Rtablepos = (int)(Rremainf*(float)SUBPIXEL); Gxposf = width * offsetG; Gxposi = (int)floorf(Gxposf); Gremainf = Gxposf - (float)Gxposi; Gtablepos = (int)(Gremainf*(float)SUBPIXEL); Bxposf = width * offsetB; Bxposi = (int)floorf(Bxposf); Bremainf = Bxposf - (float)Bxposi; Btablepos = (int)(Bremainf*(float)SUBPIXEL); Rxposi = abs(Rxposi); Gxposi = abs(Gxposi); Bxposi = abs(Bxposi); if(Rxposi==0 && Rtablepos == 0) return; // no move required RgainA = gains[Rtablepos][0]; RgainB = gains[Rtablepos][1]; RgainC = gains[Rtablepos][2]; RgainD = gains[Rtablepos][3]; GgainA = gains[Gtablepos][0]; GgainB = gains[Gtablepos][1]; GgainC = gains[Gtablepos][2]; GgainD = gains[Gtablepos][3]; BgainA = gains[Btablepos][0]; BgainB = gains[Btablepos][1]; BgainC = gains[Btablepos][2]; BgainD = gains[Btablepos][3]; if(negR == 0) { unsigned short *ptr = scanline; int nwidth = width-Rxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Rxposi+2;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } for(x=0;x<nwidth;x++) { *ptr++ = RGB48[x*3];//r ptr++;//g ptr++;//b } for(x=0;x<16;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Rxposi-2>=0) { *ptr++ = RGB48[(x+Rxposi-2)*3];//r ptr++;//g ptr++;//b } else { *ptr++ = 0;//r ptr++;//g ptr++;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Rxposi;x<width;x++) { *ptr++ = RGB48[x*3];//r ptr++;//g ptr++;//b } for(x=0;x<Rxposi+16;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } } if(negG == 0) { unsigned short *ptr = scanline; int nwidth = width-Gxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Gxposi+2;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } for(x=0;x<nwidth;x++) { ptr++;//r *ptr++ = RGB48[x*3+1];//g ptr++;//b } for(x=0;x<16;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Gxposi-2>=0) { ptr++;//r *ptr++ = RGB48[(x+Gxposi-2)*3+1];//g ptr++;//b } else { ptr++;//r *ptr++ = 0;//g ptr++;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Gxposi;x<width;x++) { ptr++;//r *ptr++ = RGB48[x*3+1];//g ptr++;//b } for(x=0;x<Gxposi+16;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } } if(negB == 0) { unsigned short *ptr = scanline; int nwidth = width-Bxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Bxposi+2;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } for(x=0;x<nwidth;x++) { ptr++;//r ptr++;//g *ptr++ = RGB48[x*3+2];//b } for(x=0;x<16;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Bxposi-2>=0) { ptr++;//r ptr++;//g *ptr++ = RGB48[(x+Bxposi-2)*3+2];//b } else { ptr++;//r ptr++;//g *ptr++ = 0;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Bxposi;x<width;x++) { ptr++;//r ptr++;//g *ptr++ = RGB48[x*3+2];//b } for(x=0;x<Bxposi+16;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } } gA1 = _mm_set_epi16(RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA); gA2 = _mm_set_epi16(BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA); gA3 = _mm_set_epi16(GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA); gB1 = _mm_set_epi16(RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB); gB2 = _mm_set_epi16(BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB); gB3 = _mm_set_epi16(GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB); gC1 = _mm_set_epi16(RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC); gC2 = _mm_set_epi16(BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC); gC3 = _mm_set_epi16(GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC); gD1 = _mm_set_epi16(RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD); gD2 = _mm_set_epi16(BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD); gD3 = _mm_set_epi16(GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3, //l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6 //l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8 if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*3; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA1); //t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0 //t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4 //t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4 //l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_slli_si128(l2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB1); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0 //t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0 //t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5 //t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5 //l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,3*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC1); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0 //t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0 //t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0 //t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,6*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); t2 = _mm_slli_si128(l3,7*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD1); o128 = _mm_adds_epi16(o128,t1); t1 = gA1; gA1 = gA2; gA2 = gA3; gA3 = t1; t1 = gB1; gB1 = gB2; gB2 = gB3; gB3 = t1; t1 = gC1; gC1 = gC2; gC2 = gC3; gC3 = t1; t1 = gD1; gD1 = gD2; gD2 = gD3; gD3 = t1; l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint) { int x,val,ypos=0,upos=1,vpos=3; int step = 1,pos=0; short *ssbase = (short *)sbase; uint32_t *lbase = (uint32_t *)sbase; ToolsHandle *tools = decoder->tools; int scaledvectorscope = 0; if(tools == NULL) return; if(whitepoint == 13) { if(format == DECODED_FORMAT_RG64) format = DECODED_FORMAT_W13A; else format = DECODED_FORMAT_WP13; } while(width/step > 360) { step*=2; } tools->waveformWidth = width/step; decoder->tools->blurUVdone = 0; switch(format & 0xffffff) { case DECODED_FORMAT_WP13: decoder->tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = ssbase[0]>>5; G = ssbase[1]>>5; B = ssbase[2]>>5; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; ssbase += step*3; } break; case DECODED_FORMAT_W13A: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = ssbase[0]>>5; G = ssbase[1]>>5; B = ssbase[2]>>5; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; ssbase += step*4; } break; case DECODED_FORMAT_RG48: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = sbase[0]>>8; G = sbase[1]>>8; B = sbase[2]>>8; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; sbase += step*3; } break; case DECODED_FORMAT_AB10: case DECODED_FORMAT_RG30: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = lbase[x]; R = (val>>22)&0xff; G = (val>>12)&0xff; B = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_AR10: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = lbase[x]; B = (val>>22)&0xff; G = (val>>12)&0xff; R = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_R210: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = SwapInt32BtoN(lbase[x]); R = (val>>22)&0xff; G = (val>>12)&0xff; B = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_DPX0: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = SwapInt32BtoN(lbase[x]); R = (val>>24)&0xff; G = (val>>14)&0xff; B = (val>>04)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_B64A: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = sbase[1]>>8; G = sbase[2]>>8; B = sbase[3]>>8; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; sbase += step*4; } break; case COLOR_FORMAT_UYVY: ypos=1,upos=0,vpos=2; case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109 case COLOR_FORMAT_YUYV: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 2; Y = bptr[ypos]-16; U = bptr[upos]-128; Y+= bptr[ypos+2]-16; Y>>=1; V = bptr[vpos]-128; R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; //TODO much -20 to 120 RGB range. if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } //* 255.0/314.0 //* 255.0/244.0 U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_YU64: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 4; bptr++; //read only the high byte out of the 16-bit Y = bptr[0]-16; V = bptr[2]-128; Y+= bptr[4]-16; Y>>=1; U = bptr[6]-128; R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_V210: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint32_t *lptr = (uint32_t *)sbase; lptr += (x/6)*4; switch(x % 6) { case 0: V = ((*lptr>>02) & 0xff) - 128; Y = ((*lptr>>12) & 0xff) - 16; U = ((*lptr>>22) & 0xff) - 128; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; case 1: lptr++; Y = ((*lptr>>02) & 0xff) - 16; V = ((*lptr>>12) & 0xff) - 128; Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1; lptr--; U = ((*lptr>>22) & 0xff) - 128; break; case 2: lptr++; Y = ((*lptr>>22) & 0xff) - 16; lptr++; U = ((*lptr>>02) & 0xff) - 128; Y+= ((*lptr>>12) & 0xff) - 16; Y>>=1; V = ((*lptr>>22) & 0xff) - 128; break; case 3: lptr++; V = ((*lptr>>12) & 0xff) - 128; lptr++; U = ((*lptr>>02) & 0xff) - 128; Y = ((*lptr>>12) & 0xff) - 16; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; case 4: lptr+=2; V = ((*lptr>>22) & 0xff) - 128; lptr++; Y = ((*lptr>>02) & 0xff) - 16; U = ((*lptr>>12) & 0xff) - 128; Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1; break; case 5: lptr+=2; V = ((*lptr>>22) & 0xff) - 128; lptr++; U = ((*lptr>>12) & 0xff) - 128; Y = ((*lptr>>22) & 0xff) - 16; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; } R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_RGB24: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int R,G,B,U,V; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 3; R = bptr[2]; G = bptr[1]; B = bptr[0]; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_RGB32: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int R,G,B,U,V; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 4; R = bptr[2]; G = bptr[1]; B = bptr[0]; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_BYR2: case COLOR_FORMAT_BYR4: //do nothing break; default: assert(0); #if (0 && DEBUG) fprintf(stderr,"decoder.HistogramLine: Unsupported pixel format\n"); #endif break; } } void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR) { #if 1 int x,RL,GL,BL,RR,GR,BR; int nRL,nGL,nBL; int nRR,nGR,nBR; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { RL = sbaseL[0]>>6; GL = sbaseL[1]>>6; //10-bit BL = sbaseL[2]>>6; RL*=RL; GL*=GL; //20-bit BL*=BL; RR = sbaseR[0]>>6; GR = sbaseR[1]>>6; //10-bit BR = sbaseR[2]>>6; RR*=RR; GR*=GR; //20-bit BR*=BR; nRL = RL*(1023-ileakL) + ileakL*max - RR*ileakL; //30-bit nGL = GL*(1023-ileakL) + ileakL*max - GR*ileakL; nBL = BL*(1023-ileakL) + ileakL*max - BR*ileakL; nRL >>= 10; //20-bit nGL >>= 10; nBL >>= 10; if(nRL>max) nRL=max; if(nRL<0) nRL=0; if(nGL>max) nGL=max; if(nGL<0) nGL=0; if(nBL>max) nBL=max; if(nBL<0) nBL=0; if(sqrttable[nRL] == 65535) sqrttable[nRL] = (int)sqrt(nRL); if(sqrttable[nGL] == 65535) sqrttable[nGL] = (int)sqrt(nGL); if(sqrttable[nBL] == 65535) sqrttable[nBL] = (int)sqrt(nBL); sbaseL[0] = sqrttable[nRL]<<6; sbaseL[1] = sqrttable[nGL]<<6; sbaseL[2] = sqrttable[nBL]<<6; sbaseL += 3; nRR = RR*(1023-ileakR) + ileakR*max - RL*ileakR; //30-bit nGR = GR*(1023-ileakR) + ileakR*max - GL*ileakR; nBR = BR*(1023-ileakR) + ileakR*max - BL*ileakR; nRR >>= 10; //20-bit nGR >>= 10; nBR >>= 10; if(nRR>max) nRR=max; if(nRR<0) nRR=0; if(nGR>max) nGR=max; if(nGR<0) nGR=0; if(nBR>max) nBR=max; if(nBR<0) nBR=0; if(sqrttable[nRR] == 65535) sqrttable[nRR] = (int)sqrt(nRR); if(sqrttable[nGR] == 65535) sqrttable[nGR] = (int)sqrt(nGR); if(sqrttable[nBR] == 65535) sqrttable[nBR] = (int)sqrt(nBR); sbaseR[0] = sqrttable[nRR]<<6; sbaseR[1] = sqrttable[nGR]<<6; sbaseR[2] = sqrttable[nBR]<<6; sbaseR += 3; } #else // works and fast but has not image linearization, not as good __m128i *ptrL = (__m128i *)sbaseL; __m128i *ptrR = (__m128i *)sbaseR; __m128i t,L,R,nL,nR; int x,width8 = (width*3) & ~7; __m128i white_epi16 = _mm_set1_epi16(32767); __m128i leak_epi16 = _mm_set1_epi16(ileak>>1); __m128i oneNegLeak_epi16 = _mm_set1_epi16(32767-(ileak>>1)); for(x=0;x<width8;x+=8) { L = _mm_load_si128(ptrL); R = _mm_load_si128(ptrR); L = _mm_srli_epi16(L,1); //15-bit R = _mm_srli_epi16(R,1); //15-bit nL = _mm_mulhi_epi16(L, oneNegLeak_epi16); t = _mm_mulhi_epi16(white_epi16, leak_epi16); nL = _mm_adds_epi16(nL, t); t = _mm_mulhi_epi16(R, leak_epi16); nL = _mm_subs_epu16(nL, t); nR = _mm_mulhi_epi16(R, oneNegLeak_epi16); t = _mm_mulhi_epi16(white_epi16, leak_epi16); nR = _mm_adds_epi16(nR, t); t = _mm_mulhi_epi16(L, leak_epi16); nR = _mm_subs_epu16(nR, t); L = _mm_slli_epi16(nL,2); R = _mm_slli_epi16(nR,2); _mm_store_si128(ptrL++, L); _mm_store_si128(ptrR++, R); } #endif } void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { #if 1 int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - ((G+B)>>1)*ileakL; //30-bit nG = G*(1023-ileakR) + ileakR*max - R*ileakR; nB = B*(1023-ileakR) + ileakR*max - R*ileakR; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } #elif 0 int x; float R,G,B; float nR,nG,nB; float fleakL = (float)ileakL / 65535.0; float fleakR = (float)ileakR / 65535.0; for(x=0;x<width;x++) { R = sbase[0]; G = sbase[1]; B = sbase[2]; R /= 65535.0; G /= 65535.0; B /= 65535.0; R *= R; G *= G; B *= B; nR = R*(1.0-fleakL) + fleakL - (G+B)*0.5*fleakL; nG = G*(1.0-fleakR) + fleakR - R*fleakR; nB = B*(1.0-fleakR) + fleakR - R*fleakR; if(nR<0) nR=0; if(nG<0) nG=0; if(nB<0) nB=0; nR = sqrt(nR); nG = sqrt(nG); nB = sqrt(nB); sbase[0] = nR * 65535.0; sbase[1] = nG * 65535.0; sbase[2] = nB * 65535.0; sbase += 3; } #elif 0 __m128i RGBRGB,rgb_epi32,RGB1,RGB2; __m128i zero_epi128 = _mm_setzero_si128(); int x,width6 = (width*3) / 6 * 6; __m128 white_ps = _mm_set1_ps(1.0); __m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakL/65536.0)); __m128 leak_ps = _mm_set_ps((float)ileakL/65536.0, (float)ileakR/65536.0, (float)ileakR/65536.0, (float)ileakL/65536.0); __m128 scale_ps = _mm_set1_ps(65535.0); __m128 scalehalf_ps = _mm_set1_ps(32767.0); __m128 zero_ps = _mm_set1_ps(0.0); __m128 rgb_ps, alt_rgb_ps; __m128i sub_epi32; __m128 sub_ps; for(x=0;x<width6;x+=6) // two RGB pairs { int R,G,B; RGBRGB = _mm_loadu_si128((__m128i *)sbase); R = _mm_extract_epi16(RGBRGB, 0); G = _mm_extract_epi16(RGBRGB, 1); B = _mm_extract_epi16(RGBRGB, 2); G+=B; G>>=1; sub_epi32 = _mm_set_epi32(G,R,R,G); sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0 sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0 sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128); rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0 rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0 rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL; rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL; sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;] rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;] rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0; rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt() rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767 RGB1 = _mm_cvtps_epi32(rgb_ps); RGB1 = _mm_packs_epi32 (RGB1, zero_epi128); RGB1 = _mm_slli_si128(RGB1, 10); RGB1 = _mm_srli_si128(RGB1, 10); RGBRGB = _mm_srli_si128(RGBRGB, 6); R = _mm_extract_epi16(RGBRGB, 0); G = _mm_extract_epi16(RGBRGB, 1); B = _mm_extract_epi16(RGBRGB, 2); G+=B; G>>=1; sub_epi32 = _mm_set_epi32(G,R,R,G); sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0 sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0 sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128); rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0 rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0 rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL; rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL; sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;] rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;] rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0; rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt() rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767 RGB2 = _mm_cvtps_epi32(rgb_ps); RGB2 = _mm_packs_epi32 (RGB2, zero_epi128); RGB2 = _mm_slli_si128(RGB2, 6); RGB1 = _mm_adds_epi16(RGB1, RGB2); RGB1 = _mm_slli_epi16(RGB1, 1); RGB1 = _mm_slli_si128(RGB1, 4); RGB1 = _mm_srli_si128(RGB1, 4); RGBRGB = _mm_srli_si128(RGBRGB, 6); RGBRGB = _mm_slli_si128(RGBRGB, 12); RGBRGB = _mm_adds_epi16(RGB1, RGBRGB); _mm_storeu_si128((__m128i *)sbase, RGBRGB); sbase += 6; } #endif } void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - B*ileakL; nG = G*(1023-ileakL) + ileakL*max - B*ileakL; nB = B*(1023-ileakR) + ileakR*max - ((R+G)>>1)*ileakR; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } } void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - G*ileakL; nG = G*(1023-ileakR) + ileakR*max - ((R+B)>>1)*ileakR; nB = B*(1023-ileakL) + ileakL*max - G*ileakL; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } } void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank) { uint16_t *scratchline,*scratchline2,*scratchline3; uint16_t *sptr; uint16_t *srclineA,*srclineB; uint16_t *dstlineA,*dstlineB; int x,y2; int width = decoder->frame.width; int height = decoder->frame.height; int skip = 3; int sskip = 3; uint8_t *bptr1; uint8_t *bptr2; uint8_t *baseptr1; uint8_t *baseptr2; float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL; float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR; float frameTilt = decoder->cfhddata.channel[0].FrameTilt; float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset; float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset; float rotOffset = decoder->cfhddata.channel[1].RotationOffset; float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset; float horizOffsetStep = 0; float horizOffsetStepR = 0; int flip1=0,flip2=0; int channel_flip = decoder->cfhddata.channel_flip; int source_pitch1 = source_pitch; int source_pitch2 = source_pitch; uint8_t *outputline = output+y*pitch; uint8_t *outputline2 = NULL; float horizOffsetBase; float rotOffsetBase; float horizOffsetBaseR; float rotOffsetBaseR; int formatdone = 0; float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX; float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX; //float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY; float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY; float zoom; float zoomR; float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom; float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom; float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom; float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom; float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom; float frameHDynamic = decoder->cfhddata.FrameHDynamic; float frameHDynCenter = decoder->cfhddata.FrameHDynCenter; float frameHDynWidth = decoder->cfhddata.FrameHDynWidth; float frameHScale = decoder->cfhddata.FrameHScale; int alphachannel = 0; int whitepoint = 16; float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen; float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen; float vignette = decoder->cfhddata.channel[0].user_vignette_start; int flip_LR = 0; float vig_r1; float vig_r2; float vig_gain; if(blank) // blankline, no shifts required { windowMaskL = 0; windowMaskR = 0; frameTilt = 0; horizOffset = 0; horizOffsetR = 0; rotOffset = 0; rotOffsetR = 0; frameZoom1 = 1.0; frameZoom2 = 1.0; frameAutoZoom = 1.0; frameDiffZoom1 = 1.0; frameDiffZoom2 = 1.0; frameHScale = 1.0; frameHDynamic = 1.0; frameHDynCenter = 0.5; frameHDynWidth = 0.0; } if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_RGB32) alphachannel = 1; if(xmax == 0.0) xmax = 1.0; if(ymax == 0.0) ymax = 1.0; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { width *= 2; } if(decoder->source_channels < 2) // 2D { channel_flip &= 0x3; channel_flip |= channel_flip<<2; decoder->cfhddata.channel_flip = channel_flip; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER || decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED) { blursharpenL = 0.0; blursharpenR = 0.0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION)) { horizOffset = rotOffset = 0; horizOffsetR = rotOffsetR = 0; frameTilt = 0; frameAutoZoom = 1.0; frameDiffZoom1 = 1.0; frameDiffZoom2 = 1.0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) { horizOffset += decoder->cfhddata.FrameOffsetX; horizOffsetR -= decoder->cfhddata.FrameOffsetX; frameZoom1 += frameHScale - 1.0f; frameZoom2 += frameHScale - 1.0f; if(frameHDynamic != 1.0) { frameZoom1 += 0.00001f; frameZoom2 += 0.00001f; } if(vignette != 0.0) { float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width)); vig_r1 = (vignette+1.0f); vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end+1.0f); vig_gain = decoder->cfhddata.channel[0].user_vignette_gain; vig_r1 *= vig_diag; vig_r2 *= vig_diag; } } else { frameZoom1 = 1.0f; frameZoom2 = 1.0f; vignette = 0; } zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1; if(frameDiffZoom2 != 0.0) zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2; else zoomR = 0.0; if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) { if(decoder->cfhddata.InvertOffset) { rotOffset = -rotOffset; rotOffsetR = -rotOffsetR; rotOffset -= decoder->cfhddata.FrameOffsetR; rotOffsetR -= -decoder->cfhddata.FrameOffsetR; } else { rotOffset += decoder->cfhddata.FrameOffsetR; rotOffsetR += -decoder->cfhddata.FrameOffsetR; } } rotOffsetBase = rotOffset; horizOffsetBase = horizOffset; rotOffsetBaseR = rotOffsetR; horizOffsetBaseR = horizOffsetR; horizOffset -= rotOffset * 0.5f; horizOffsetStep = rotOffset / (float)height; horizOffsetR -= rotOffsetR * 0.5f; horizOffsetStepR = rotOffsetR / (float)height; horizOffset += horizOffsetStep * y; horizOffsetR += horizOffsetStepR * y; assert(bufferremain >= width * 8 * 2 * 2); baseptr1 = source_buffer; baseptr2 = source_buffer + channel_offset; if(channel_flip & 0xf) { if(channel_flip & 1) { flip1 = 1; } if(channel_flip & 4) { flip2 = 1; } } if(source_pitch1 < 0) flip_LR = 1; decoder->sharpen_flip = 0; if(channel_flip & 2) //ProcessLine3D { if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1) { } else { baseptr1 += source_pitch1*(height-1); source_pitch1 = -source_pitch1; decoder->sharpen_flip = 1; } } if(channel_flip & 8) { if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1) { baseptr1 += source_pitch1*(height-1); source_pitch1 = -source_pitch1; decoder->sharpen_flip = 1; } else { baseptr2 += source_pitch2*(height-1); source_pitch2 = -source_pitch2; } } bptr1 = baseptr1 + y*source_pitch1; bptr2 = baseptr2 + y*source_pitch2; y2 = y; if(decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView { if(y2 < height/4) { blank = 1; y2 = 0; } else { y2 -= height/4; y2 *= 2; if(y2 >= height-1) { blank = 1; y2 = height - 2; } } bptr1 = baseptr1 + y2*source_pitch1; bptr2 = baseptr2 + y2*source_pitch2; } srclineA = (uint16_t *)bptr1; srclineB = (uint16_t *)bptr2; scratchline = (uint16_t *)buffer; scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;; scratchline3 = (uint16_t *)(buffer + width * 6*2 + width*2) /* as we pad the line */ ; if(alphachannel) { scratchline = (uint16_t *)buffer; scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;; scratchline3 = (uint16_t *)(buffer + width * 8*2 + width*2) /* as we pad the line */ ; } dstlineA = sptr = scratchline; dstlineB = scratchline3; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RG64: whitepoint = 16; skip = 8; sskip = 4; break; case DECODED_FORMAT_W13A: whitepoint = 13; skip = 8; sskip = 4; break; case DECODED_FORMAT_WP13: whitepoint = 13; skip = 6; sskip = 3; break; case DECODED_FORMAT_RG48: skip = 6; sskip = 3; break; case DECODED_FORMAT_RGB32: skip = 4; break; case DECODED_FORMAT_RGB24: skip = 3; break; case DECODED_FORMAT_YUYV: skip = 2; break; } if(blank) { if(srclineA) memset(srclineA, 0, width*skip); if(srclineB && decoder->channel_decodes > 1) memset(srclineB, 0, width*skip); } if(blursharpenL != 0.0 || blursharpenR != 0.0) { if(decoder->channel_blend_type == BLEND_FREEVIEW || decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED ) { decoder->doVerticalFilter = 0; } else { decoder->doVerticalFilter = 1; } } { switch(decoder->channel_blend_type) { case BLEND_FREEVIEW: case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side if(!blank) { if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { dstlineA = srclineA; sptr = dstlineA; if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { int cwidth= width/2; if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC) cwidth= width; FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip); memcpy(dstlineA+sskip*(width/2), srclineB, width/2*sskip*2); } else { int16_t *ptr; int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(!alphachannel) { if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } else { if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { int cwidth= width/2; if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC) cwidth= width; FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip); dstlineA = srclineA; ptr = (int16_t *)srclineA; for(x=0; x<width/2; x++) { *ptr++ = (ptr1[0]+ptr1[3])>>1; *ptr++ = (ptr1[1]+ptr1[4])>>1; *ptr++ = (ptr1[2]+ptr1[5])>>1 ; ptr1+=sskip*2; } for(; x<width; x++) { *ptr++ = (ptr2[0]+ptr2[3])>>1; *ptr++ = (ptr2[1]+ptr2[4])>>1; *ptr++ = (ptr2[2]+ptr2[5])>>1; ptr2+=sskip*2; } } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, dstlineA, width/2, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, dstlineA, width/2, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, dstlineA, width/2, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, dstlineA, dstlineA+width*sskip/2, width/2, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { memcpy(scratchline2+width*sskip/2, dstlineA, width*sskip*2/2); memcpy(dstlineA, dstlineA+width*sskip/2, width*sskip*2/2); memcpy(dstlineA+width*sskip/2, scratchline2+width*sskip/2, width*sskip*2/2); } } break; case BLEND_STACKED_ANAMORPHIC: //stacked case BLEND_LINE_INTERLEAVED: //fields if((y & 1) == 1) return; if(!blank) { uint16_t *ptrA1 = (uint16_t *)srclineA; uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1>>1); uint16_t *ptrB1 = (uint16_t *)srclineB; uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2>>1); FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width*skip); FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width*skip); if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(decoder->doVerticalFilter == 0) { if(decoder->channel_blend_type==BLEND_STACKED_ANAMORPHIC) //stacked { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { outputline2 = output+(y>>1)*pitch; outputline = output+((y>>1)+(height/2))*pitch; } else { outputline = output+(y>>1)*pitch; outputline2 = output+((y>>1)+(height/2))*pitch; } } else //fields { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { outputline = output+(y)*pitch; outputline2 = output+(y+1)*pitch; } else { outputline2 = output+(y)*pitch; outputline = output+(y+1)*pitch; } } if(flip_LR/*source_pitch1 < 0*/) // flip Left and Right { uint8_t *tmp = outputline2; outputline2 = outputline; outputline = tmp; } } else { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { memcpy(scratchline2, srclineA, width*skip); memcpy(srclineA, srclineB, width*skip); memcpy(srclineB, scratchline2, width*skip); } } } break; case BLEND_ONION: //onion case BLEND_DIFFERENCE: //difference case BLEND_SPLITVIEW: //splitView if(!blank) { //dstlineA = source_buffer; //dstlineA += (source_pitch>>1) * y; sptr = dstlineA = srclineA; srclineA = (uint16_t *)bptr1; srclineB = (uint16_t *)bptr2; if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } x = 0; if(decoder->channel_blend_type == BLEND_SPLITVIEW) //split view { int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255; for(x = xsplit*sskip; x<width*sskip; x++) { srclineA[x] = srclineB[x]; } } else if(decoder->channel_blend_type == BLEND_ONION) //onion { FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width*skip); } else if(decoder->channel_blend_type == BLEND_DIFFERENCE) //difference { #if XMMOPT int width8 = (width*sskip) & 0xfff8; __m128i mid_epi16; //int unaligned = ((int)sbase) & 15; //unaligned += ((int)in_rgb8) & 15; if(whitepoint == 13) mid_epi16 = _mm_set1_epi16(0x0fff); else mid_epi16 = _mm_set1_epi16(0x1fff); for(x=0; x<width8; x+=8) { __m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]); __m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]); // 0 to 0xffff if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff } else { rgb16A = _mm_subs_epi16(rgb16A, rgb16B); } rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff _mm_store_si128((__m128i *)&dstlineA[x], rgb16A); } #endif for(; x<width*sskip; x++) { int val; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { val = (srclineB[x] - srclineA[x]) + 32768; } else { val = (srclineA[x] - srclineB[x]) + 32768; } if(val > 0x7fff) val = 0x7fff; if(val < 0) val = 0; dstlineA[x] = val; } } } break; case BLEND_ANAGLYPH_RC: case BLEND_ANAGLYPH_RC_BW: case BLEND_ANAGLYPH_AB: case BLEND_ANAGLYPH_AB_BW: case BLEND_ANAGLYPH_GM: case BLEND_ANAGLYPH_GM_BW: case BLEND_ANAGLYPH_DUBOIS: //Optimized { uint16_t *sptr1 = scratchline2; uint16_t *sptr2 = scratchline3; dstlineA = (uint16_t *)bptr1; // dstlineA += (source_pitch>>1) * y; sptr = dstlineA; sptr1 = srclineA = (uint16_t *)bptr1; sptr2 = srclineB = (uint16_t *)bptr2; if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1); RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { uint16_t *tmp = srclineA; srclineA = srclineB; srclineB = tmp; } switch(decoder->channel_blend_type) { case BLEND_ANAGLYPH_RC: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr1[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr2[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_RC_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y1; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y2; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_AB: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr2[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr1[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_AB_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y2; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y1; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_GM: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr2[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr1[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_GM_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y2; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y1; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_DUBOIS: //Optimized { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; int r,g,b; for(x=0; x<width; x++) { r =(ptr1[0]*456 + ptr1[1]*500 + ptr1[2]*176 + ptr2[0]*-43 + ptr2[1]*-88 + ptr2[2]*-2 ) / 1000; g =(ptr1[0]*-40 + ptr1[1]*-38 + ptr1[2]*-16 + ptr2[0]*378 + ptr2[1]*734 + ptr2[2]*-18 ) / 1000; b =(ptr1[0]*-15 + ptr1[1]*-21 + ptr1[2]*-5 + ptr2[0]*-72 + ptr2[1]*-113+ ptr2[2]*1226) / 1000; if(r<0) r=0; if(r>0x3fff) r=0x3fff; if(g<0) g=0; if(g>0x3fff) g=0x3fff; if(b<0) b=0; if(b>0x3fff) b=0x3fff; sptr[0] = r; sptr[1] = g; sptr[2] = b; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } break; } } break; case BLEND_NONE: default: if(decoder->channel_decodes == 1) // only one channel { if(skip == 8) { //the data is already in the correct format sptr = (unsigned short *)bptr1; // shift if needed. if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(decoder->channel_current == 0) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1); else RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2); else RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } else if(skip == 6) { //the data is already in the correct format dstlineA = sptr = (unsigned short *)srclineA; // shift if needed. if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(decoder->channel_current == 0) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); } if(decoder->channel_current == 0) { if(blursharpenL != 0.0) { FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); } } else { if(blursharpenR != 0.0) { FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip); } } } if ((windowMaskL && decoder->channel_current == 0) || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; if(decoder->channel_current != 0) mask = xmin; if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); RGB48WindowMask(decoder, srclineA, width, 0, mask); } if ((windowMaskR && decoder->channel_current == 1) || (1.0f-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); if(decoder->channel_current != 1) mask = (1.0f-xmax); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR); RGB48WindowMask(decoder, srclineA, width, 1, mask); } } else { outputline2 = output+(y+height)*pitch; if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); else RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } } break; } } if(!formatdone) { int flags = ACTIVEMETADATA_PRESATURATED; int whitebitdepth = 16; if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { flags = 0; whitebitdepth = 13; } if(outputline2) { // if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth); if(decoder->doVerticalFilter == 0) // No sharp stage so output now { if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); //if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) // HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth); if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineB, outputline2, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineB, outputline2, pitch, decoder->frame.format, whitebitdepth, flags); } } else { //if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) //{ // if(alphachannel) // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth); // else // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth); //} if(decoder->doVerticalFilter == 0) // No sharp stage so output now { if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); } } } } void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index) { uint16_t *sbase;//*sbase2 = NULL; int width = decoder->frame.width; int height = decoder->frame.height; int skip = 3; //int flip1=0;//flip2=0; int channel_flip = decoder->cfhddata.channel_flip; //int local_pitch1 = local_pitch; //int local_pitch2 = local_pitch; uint8_t *outputline = output+y*pitch; //uint8_t *outputline2 = NULL; short *scratch; //int formatdone = 0; //float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX; //float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX; //float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY; //float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY; int alphachannel = 0; float blursharpen = 0; int line_max = decoder->frame.height; int yy = y; if(decoder->channel_current == 0) blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen else blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX)|| decoder->frame.resolution == DECODED_RESOLUTION_QUARTER || decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED) { blursharpen = 0.0; } if(decoder->channel_mix_half_res == 1) line_max *= 2; if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->sharpen_flip) //SharpenLine { //if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1) { yy = (line_max - 1 - y); outputline = output+yy*pitch; } } if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_RGB32) alphachannel = 1; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { width *= 2; } sbase = (uint16_t *)local_output; sbase += (local_pitch>>1) * y; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RG64: case DECODED_FORMAT_W13A: skip = 8; break; case DECODED_FORMAT_WP13: skip = 6; break; case DECODED_FORMAT_RG48: skip = 6; break; case DECODED_FORMAT_RGB32: skip = 4; break; case DECODED_FORMAT_RGB24: skip = 3; break; case DECODED_FORMAT_YUYV: skip = 2; break; } scratch = (short*)(buffer + width * skip * thread_index); { int flags = ACTIVEMETADATA_PRESATURATED; int whitebitdepth = 16; if((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)) { int use_pitch = local_pitch; int edgeclose = 0; flags = 0; whitebitdepth = 13; if(blursharpen != 0.0 && local_pitch != 0) { short *Aptr,*Bptr,*Cptr,*Dptr,*Eptr; switch(decoder->channel_blend_type) { case BLEND_STACKED_ANAMORPHIC: sbase = (uint16_t *)local_output; sbase += (local_pitch>>1) * y * 2; if(y<=4) edgeclose = 1; if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase; if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase; if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase; if(y>=height-4) edgeclose = 1; use_pitch = local_pitch * 2; break; case BLEND_LINE_INTERLEAVED: sbase = (uint16_t *)local_output; if(y & 1) { y--; sbase += (local_pitch>>1) * y; } else { sbase += (local_pitch>>1) * y; sbase += channel_offset>>1; } if(y<=8) edgeclose = 1; if(y>=4) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase; if(y>=2) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-2) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase; if(y<height-4) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase; if(y>=height-8) edgeclose = 1; use_pitch = local_pitch * 2; break; default: if(y<=4) edgeclose = 1; if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 2; else Aptr = (short *)sbase; if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 1; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 1; else Dptr = (short *)sbase; if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 2; else Eptr = (short *)sbase; if(y>=height-4) edgeclose = 1; use_pitch = local_pitch; break; } if(skip == 8) { FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose, scratch, width, blursharpen, decoder->frame.resolution, decoder->channel_blend_type); } else { FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose, scratch, width, blursharpen, decoder->frame.resolution, decoder->channel_blend_type); } sbase = (uint16_t *)scratch; } } if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, sbase, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, sbase, outputline, pitch, decoder->frame.format, whitebitdepth, flags); } } #if _GRAPHICS void PaintFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int x,y,v,width, height; int maxR=0,maxG=0,maxB=0; width = decoder->frame.width; height = decoder->frame.height; if(decoder->cfhddata.BurninFlags == 0) return; if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1) // tools { if(decoder->tools == NULL) { #if _ALLOCATOR decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle)); #else decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle)); #endif if(decoder->tools) { memset(decoder->tools, 0, sizeof(ToolsHandle)); } else { return; } } } decoder->frame.output_format = output_format; #if _THREADED && 1 if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1 && decoder->tools) // histogram/scopes/waveform { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; #if _DELAY_THREAD_START if(decoder->tools->histogram == 0 && decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif { int avgR=0,avgG=0,avgB=0; // Post a message to the mailbox mailbox->output = output; if(height >= 1080) { mailbox->pitch = pitch*4; // only read every 4th scan line workunits = height/4; // only read every 4th scan line } else if(height >= 540) { mailbox->pitch = pitch*2; // only read every 2th scan line workunits = height/2; // only read every 2th scan line } else { mailbox->pitch = pitch; // read every scan line workunits = height; // read every scan line } if(decoder->tools->histogram == 0) { mailbox->jobType = JOB_TYPE_HISTOGRAM; // histogram // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } for(x=0;x<256;x++) { avgR += decoder->tools->histR[x]; avgG += decoder->tools->histG[x]; avgB += decoder->tools->histB[x]; //if(maxR < decoder->histR[x]) maxR = decoder->histR[x]; //if(maxG < decoder->histG[x]) maxG = decoder->histG[x]; //if(maxB < decoder->histB[x]) maxB = decoder->histB[x]; } avgR /= 256; avgG /= 256; avgB /= 256; //maxR++; //maxG++; //maxB++; decoder->tools->maxR = avgR*3;//maxR; decoder->tools->maxG = avgG*3;//maxG; decoder->tools->maxB = avgB*3;//maxB; } } #endif if(decoder->cfhddata.BurninFlags && DrawOpen(decoder)) { if(decoder->cfhddata.BurninFlags & 3) // overlays / tools { #if _THREADED //DrawInit(decoder); //DrawStartThreaded(decoder); if(decoder->draw_thread.pool.thread_count > 0) { DrawWaitThreaded(decoder); } else #endif { DrawInit(decoder); DrawMetadataObjects(decoder); } } else { DrawInit(decoder); } if(decoder->drawSafeMarkers) DrawSafeMarkers(decoder); if(decoder->cfhddata.BurninFlags & 2) // tools { if(decoder->tools) { if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 16) DrawGrid(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 2) DrawHistogram(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 4) DrawWaveform(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 8) DrawVectorscope(decoder, 0/*decoder->MDPcurrent.parallax*/); } } DrawScreen(decoder, output, pitch, output_format); } #if 0 #if _THREADED && 1 if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & 2 && decoder->tools) // histogram { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; int targetW, targetH; if(width < 256 || height < 256) return; targetW = width / 4; targetH = height / 8; mailbox->output = output; mailbox->pitch = pitch; workunits = targetW; mailbox->jobType = JOB_TYPE_BURNINS; // burnin // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else if(decoder->histogram == 0) { for(y=0; y<height; y+=4) { uint8_t *bptr = output; bptr += pitch * y; HistogramLine(decoder, (unsigned short *)bptr, width, output_format); if(decoder->histogram == 0) return; // don't know how to create Histogram for that format } } for(x=1;x<255;x++) { if(maxR < decoder->histR[x]) maxR = decoder->histR[x]; if(maxG < decoder->histG[x]) maxG = decoder->histG[x]; if(maxB < decoder->histB[x]) maxB = decoder->histB[x]; } maxR++; maxG++; maxB++; decoder->maxR = maxR; decoder->maxG = maxG; decoder->maxB = maxB; for(x=0; x<targetW; x++) { HistogramRender(decoder, output, pitch, output_format, x, targetW, targetH); } #endif #endif if(decoder->tools) memset(decoder->tools, 0, sizeof(ToolsHandle)); } #endif extern int geomesh_alloc_cache(void *gm); #define DEG2RAD(d) (PI*(d)/180.0f) #define RAD2DEG(r) (180.0f*(r)/PI) bool approx_equal(int x, int y) { if(y > 1080) { x >>= 6; y >>= 6; } else if(y > 540) { x >>= 5; y >>= 5; } else { x >>= 4; y >>= 4; } if(x == y || x+1 == y || x == y+1) return true; return false; } bool approx_equal_float(float x, float y) { if (x*0.99 < y && y < x*1.01) return true; return false; } #if WARPSTUFF void WarpFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int width, height; //int maxR = 0, maxG = 0, maxB = 0; int status = WARPLIB_SUCCESS; CFHDDATA *cfhddata = &decoder->cfhddata; int backgroundfill = cfhddata->lensFill; float sensorcrop = 1.0; float phi, theta, rho; int srcLens = HERO4; if (!cfhddata->doMesh) return; if (decoder->lastLensOffsetX != cfhddata->LensOffsetX || decoder->lastLensOffsetY != cfhddata->LensOffsetY || decoder->lastLensOffsetZ != cfhddata->LensOffsetZ || decoder->lastLensOffsetR != cfhddata->LensOffsetR || decoder->lastLensZoom != cfhddata->LensZoom || decoder->lastLensFishFOV != cfhddata->LensFishFOV || decoder->lastLensGoPro != cfhddata->lensGoPro || decoder->lastLensSphere != cfhddata->lensSphere || decoder->lastLensFill != cfhddata->lensFill || decoder->lastLensStyleSel != cfhddata->lensStyleSel || memcmp(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)) || memcmp(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)) ) { if (decoder->mesh) geomesh_destroy(decoder->mesh); width = decoder->frame.width; height = decoder->frame.height; if (approx_equal(width, height * 2)) // approx. 2:1 { float outputaspect = 16.0f/9.0f; srcLens = EQUIRECT; sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image. if (cfhddata->lensCustomSRC[1]) { outputaspect = cfhddata->lensCustomSRC[0] / cfhddata->lensCustomSRC[1]; if (outputaspect >= 1.0f && outputaspect <= 3.0f) { //float sourceratio = (float)width / (float)height; if (approx_equal_float(outputaspect, 4.0f / 3.0f)) sensorcrop = sqrtf((float)(width*width + height*height)) / sqrtf((float)((width * 2 / 3)*(width * 2 / 3) + (height*height))); if (approx_equal_float(outputaspect, 16.0f / 9.0f)) // 0.88; sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image. } } if (width >= 2496) decoder->mesh = geomesh_create(199, 99); else if (width >= 1272) decoder->mesh = geomesh_create(99, 49); else decoder->mesh = geomesh_create(49, 25); phi = cfhddata->LensOffsetX * DEG2RAD(720.0f); // +-180deg HFOV for 2:1 theta = cfhddata->LensOffsetY * DEG2RAD(720.0f); // +-180deg VFOV for 2:1 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } else if (approx_equal(width * 3, height * 4)) // approx. 4:3 { srcLens = HERO4; sensorcrop = 1.0; if (width > 2880) // UHD decoder->mesh = geomesh_create(159, 119); else if (width >= 1920) //HD/2.7K decoder->mesh = geomesh_create(79, 59); else decoder->mesh = geomesh_create(39, 29); phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60deg HFOV for 16:9 theta = cfhddata->LensOffsetY * DEG2RAD(98.0f); // +-49deg VFOV for 16:9 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } else //if(approx_equal(width*9,height*16)) // approx. 16:9 { srcLens = HERO4; sensorcrop = sqrtf(1920 * 1920 + 1080 * 1080) / sqrtf(2000 * 2000 + 1500 * 1500); // 3840x2160 from 4000x3000 if (width > 2880) // UHD decoder->mesh = geomesh_create(159, 119); else if (width >= 1920) //HD/2.7K decoder->mesh = geomesh_create(79, 59); else decoder->mesh = geomesh_create(39, 29); phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60.1deg HFOV for 16:9 theta = cfhddata->LensOffsetY * DEG2RAD(70.0f); // +-34.75deg VFOV for 16:9 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } if ((output_format & 0x7fffffff) == COLOR_FORMAT_YUYV) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_YUY2, width, height, pitch, WARPLIB_FORMAT_YUY2, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RGB32) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_32BGRA, width, height, pitch, WARPLIB_FORMAT_32BGRA, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_W13A) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_W13A, width, height, pitch, WARPLIB_FORMAT_W13A, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_WP13) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_WP13, width, height, pitch, WARPLIB_FORMAT_WP13, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RG48) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_RG48, width, height, pitch, WARPLIB_FORMAT_RG48, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_BGRA64) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_64ARGB, width, height, pitch, WARPLIB_FORMAT_64ARGB, backgroundfill); else assert(0); if (cfhddata->lensSphere == 1) { if (cfhddata->lensGoPro != 2) // not outputting EQUIRECT { if (cfhddata->LensOffsetR != 0.0) { //float angle = 360.0 * asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159); float angle = 360.0f * cfhddata->LensOffsetR * cfhddata->LensOffsetR * 2.1f;//asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159); if (cfhddata->LensOffsetR < 0.0) angle = -angle; geomesh_transform_rotate(decoder->mesh, angle); } if (cfhddata->LensZoom != 1.0) geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom); if (cfhddata->LensFishFOV != 0.0) // DeFish { float fov = cfhddata->LensFishFOV;// *180.0; if (fov > 89.9f) fov = 89.9f; if (fov < -89.9f) fov = -89.9f; if (fov) status |= geomesh_transform_defish(decoder->mesh, fov); } } switch (cfhddata->lensGoPro) { case 0: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, RECTILINEAR); break; case 1: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, HERO4); break; case 2: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, EQUIRECT); break; case 4: geomesh_set_custom_lens(decoder->mesh, cfhddata->lensCustomSRC, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)); if (srcLens == EQUIRECT) geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, EQUIRECT, CUSTOM_LENS); else geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, CUSTOM_LENS, CUSTOM_LENS); break; } } else // old boring geometry { if (cfhddata->LensZoom != 1.0) geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom); // basic orthographic moves if (cfhddata->LensOffsetX != 0.0 || cfhddata->LensOffsetY != 0.0) geomesh_transform_pan(decoder->mesh, cfhddata->LensOffsetX*(float)width, -cfhddata->LensOffsetY*(float)height); if (cfhddata->LensOffsetR != 0.0) { float angle = 360.0f * asinf(cfhddata->LensOffsetR * 1.7777777777f) / (2.0f * 3.14159f); geomesh_transform_rotate(decoder->mesh, angle); } if (cfhddata->lensGoPro == 0) //Rectilear status |= geomesh_transform_gopro_to_rectilinear(decoder->mesh, sensorcrop); //status |= geomesh_fisheye_gopro_adjustmesh(mesh, &correction_mode, WARPLIB_ALGORITHM_PRESERVE_EVERYTHING,//WARPLIB_ALGORITHM_BEST_FIT, // width, height, product, model, lens_type, fov, (int)decoder->frame.resolution); } geomesh_alloc_cache(decoder->mesh); // required for JOB_TYPE_WARP_CACHE if (status == WARPLIB_SUCCESS) { if (decoder->lens_correct_buffer == NULL) { #if _ALLOCATOR decoder->lens_correct_buffer = (int *)Alloc(decoder->allocator, pitch * height); #else decoder->lens_correct_buffer = (int *)MEMORY_ALLOC(pitch * height); #endif } } else { return; } /* need resources? { if(decoder->tools == NULL) { #if _ALLOCATOR decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle)); #else decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle)); #endif if(decoder->tools) { memset(decoder->tools, 0, sizeof(ToolsHandle)); } else { return; } } } */ #if _THREADED && 1 { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits = decoder->frame.height; #if _DELAY_THREAD_START if (decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16, WorkerThreadProc, decoder); } #endif { // Post a message to the mailbox mailbox->data = decoder->mesh; mailbox->output = output; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.height; mailbox->chunk_size = 16; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP_CACHE; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #endif //decoder->frame.output_format = output_format; decoder->lastLensOffsetX = cfhddata->LensOffsetX; decoder->lastLensOffsetY = cfhddata->LensOffsetY; decoder->lastLensOffsetZ = cfhddata->LensOffsetZ; decoder->lastLensOffsetR = cfhddata->LensOffsetR; decoder->lastLensZoom = cfhddata->LensZoom; decoder->lastLensFishFOV = cfhddata->LensFishFOV; decoder->lastLensGoPro = cfhddata->lensGoPro; decoder->lastLensSphere = cfhddata->lensSphere; decoder->lastLensFill = cfhddata->lensFill; decoder->lastLensStyleSel = cfhddata->lensStyleSel; memcpy(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)); memcpy(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)); } #if _THREADED && 1 { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits = decoder->frame.height; mailbox->data = decoder->mesh; mailbox->output = output; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.height; mailbox->chunk_size = 16; workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); if(backgroundfill) // may need to blur the filled in areas { mailbox->data = decoder->mesh; mailbox->output = (uint8_t *)decoder->lens_correct_buffer; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.width; mailbox->chunk_size = 16; mailbox->pitch = pitch; workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP_BLURV; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #else // not threading { //geomesh_cache_init_bilinear(decoder->mesh); //bad geomesh_cache_init_bilinear_range(decoder->mesh, 0, decoder->frame.height); //good geomesh_apply_bilinear(decoder->mesh, (unsigned char *)output, (unsigned char *)decoder->lens_correct_buffer, 0, decoder->frame.height); } #endif memcpy(output, decoder->lens_correct_buffer, pitch * decoder->frame.height); /* if(lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, lens_correct_buffer); #else MEMORY_ALIGNED_FREE(lens_correct_buffer); #endif geomesh_destroy(mesh); */ } void MaskFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int x, y, width, height; int minY, maxY; int minX, maxX; CFHDDATA *cfhddata = &decoder->cfhddata; uint8_t *line = output; uint32_t fillA = 0; uint32_t fillB = 0; int bitsize = 8; if (!cfhddata->doMesh) return; width = decoder->frame.width; height = decoder->frame.height; if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 0.0 && decoder->cfhddata.LensXmax == 0.0) return; if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 1.0 && decoder->cfhddata.LensXmax == 1.0) return; minY = (int)(decoder->cfhddata.LensYmin*(float)height); maxY = (int)(decoder->cfhddata.LensYmax*(float)height); minX = 0xfffc & (int)(decoder->cfhddata.LensXmin*(float)pitch); maxX = 0xfffc & (int)(decoder->cfhddata.LensXmax*(float)pitch); if (FORMATRGB(output_format)) { line = output; // Top rows for (y = 0; y < minY; y++) { memset(line, 0, abs(pitch)); line += pitch; } // Left and Right edges of middle rows if (maxX - minX != pitch) { for (; y < maxY; y++) { memset(line, 0, minX); memset(line + maxX, 0, pitch - maxX); line += pitch; } } //Bottom wows y = maxY; line = output + y*pitch; for (; y < height; y++) { memset(line, 0, abs(pitch)); line += pitch; } } else { switch (output_format & 0x7fffffff) { case COLOR_FORMAT_YVYU: case COLOR_FORMAT_YUYV: fillA = 0x10; fillB = 0x80; break; case COLOR_FORMAT_UYVY: case COLOR_FORMAT_2VUY: fillA = 0x80; fillB = 0x10; break; case COLOR_FORMAT_YU64: fillA = 0x8000; fillB = 0x1000; bitsize = 16; break; } } if (bitsize == 8) { line = output; // Top rows for (y = 0; y < minY; y++) { for (x = 0; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } // Left and Right edges of middle rows if (maxX - minX != pitch) { for (; y < maxY; y++) { for (x = 0; x < minX; x += 2) { line[x] = fillA; line[x + 1] = fillB; } for (x = maxX; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } } //Bottom wows y = maxY; line = output + y*pitch; for (; y < height; y++) { for (x = 0; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } } } #endif //#if WARPSTUFF void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset) { uint8_t *local_output_double = local_output; //Frame_Region emptyFrameMask = {0}; if(decoder->StereoBuffer) local_output_double = local_output = (uint8_t *)decoder->StereoBuffer; if(channel_offset < 0) // channel swapped { channel_offset = -channel_offset; } if(INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format)) { local_output += local_pitch*(decoder->frame.height-1); if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) local_output_double += local_pitch*(decoder->frame.height*decoder->channel_decodes-1); else local_output_double = local_output; local_pitch = -local_pitch; } if(FLIPCOLORS(output_format) || output_format & 0x80000000) { decoder->cfhddata.InvertOffset = 1; } else { decoder->cfhddata.InvertOffset = 0; } decoder->frame.format = output_format; //decoder->frame.colorspace = COLOR_SPACE_CG_601; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) && (decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[1].FrameTilt || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset || decoder->cfhddata.channel[2].FrameTilt)) || ((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && (decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || // decoder->cfhddata.FrameOffsetX || || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0) )) { //int x; int xbytes, xstep; //uint8_t *base = local_output; int width, height, chunk_size; int fine_vertical = 0; width = decoder->frame.width; height = decoder->frame.height; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xbytes = width*4; xstep = 16; break; case DECODED_FORMAT_RGB24: xbytes = width*3; xstep = 16; break; case DECODED_FORMAT_YUYV: xbytes = width*2; xstep = 16; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: xbytes = width*8; xstep = 32; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: xbytes = width*6; xstep = 32; break; default: assert(0); break; } if(!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION|PROCESSING_FRAMING)) || (decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 && decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 && decoder->cfhddata.FrameOffsetR == 0.0)) { chunk_size = 8; } else { chunk_size = 1; if((fabs(decoder->cfhddata.channel[1].RotationOffset) + fabs(decoder->cfhddata.channel[1].FrameKeyStone*0.2) + fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 || (fabs(decoder->cfhddata.channel[2].RotationOffset) + fabs(decoder->cfhddata.channel[2].FrameKeyStone*0.2) + fabs(decoder->cfhddata.FrameOffsetR)) > 0.015) { switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xstep = 4; break; case DECODED_FORMAT_RGB24: xstep = 3; break; case DECODED_FORMAT_YUYV: xstep = 4; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: xstep = 8; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: xstep = 6; break; } fine_vertical = 1; } } if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 && (decoder->frame.resolution == DECODED_RESOLUTION_FULL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) && decoder->codec.progressive == false) { int interlaced_pitch = local_pitch * 2; uint8_t *field2_output = local_output + local_pitch; // Post a message to the mailbox mailbox->local_output = local_output; mailbox->local_pitch = interlaced_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->info.height >>= 1; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->chunk_size = chunk_size; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); // Post a message to the mailbox mailbox->local_output = field2_output; mailbox->local_pitch = interlaced_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->info.height >>= 1; mailbox->chunk_size = chunk_size; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } else { //TODO Lens corect here. //call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.) // JOB_TYPE_HORIZONTAL_3D //before doing any offset and rotation corrections. if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129 width /= 2; // Post a message to the mailbox mailbox->local_output = local_output; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = chunk_size; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->local_output = local_output; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = 16; mailbox->line_max = decoder->frame.height; if(decoder->channel_mix_half_res == 1) mailbox->line_max *= 2; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; decoder->doVerticalFilter = 0; mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); if(decoder->doVerticalFilter) { // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->local_output = local_output_double; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = 16; mailbox->line_max = decoder->frame.height; if(decoder->channel_decodes == 2 && decoder->channel_blend_type == 0) mailbox->line_max *= 2; if(decoder->channel_mix_half_res == 1) mailbox->line_max *= 2; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #else { int y,width, height; uint8_t scratch[4096*16]; int scratchremain = 4096*16; int ymin = 0, ymax; width = decoder->frame.width; height = decoder->frame.height; ymax = height; if((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32)) { ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY; ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY; } if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) && (decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset)) || ((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && (decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || decoder->cfhddata.FrameOffsetX || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0)) { int x,xbytes, xstep; uint8_t *base = local_output; float voffsetstep; float voffset = decoder->cfhddata.channel[1].VerticalOffset; float roffset = decoder->cfhddata.channel[1].RotationOffset; float voffset1, voffset2; float voffsetstep1, voffsetstep2; int channel_flip = decoder->cfhddata.channel_flip; int aspectx,aspecty; float aspectfix; GetDisplayAspectRatio(decoder, &aspectx, &aspecty); aspectfix = (float)(aspectx*aspectx) / (float)(aspecty*aspecty); if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION)) { voffset = roffset = 0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) voffset += decoder->cfhddata.FrameOffsetY; if(decoder->cfhddata.InvertOffset) { voffset = -voffset; roffset = -roffset; } switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xbytes = width*4; xstep = 16; break; case DECODED_FORMAT_RGB24: xbytes = width*3; xstep = 16; break; case DECODED_FORMAT_YUYV: xbytes = width*2; xstep = 16; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: xbytes = width*6; xstep = 32; break; } //DAN20100923 -- simplied //voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5; //voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep); voffset += roffset * aspectfix * 0.5; voffsetstep = -roffset * aspectfix / (float)(xbytes/xstep); if(roffset == 0.0) xstep = xbytes; voffset1 = voffset2 = voffset; voffsetstep1 = voffsetstep2 = voffsetstep; if(channel_flip & 0xf) { if(channel_flip & 2) { voffset1 = -voffset1; voffsetstep1 = -voffsetstep1; } if(channel_flip & 8) { voffset2 = -voffset2; voffsetstep2 = -voffsetstep2; } if(channel_flip & 1) { voffset1 += voffsetstep1*(xbytes/xstep); voffsetstep1 = -voffsetstep1; } if(channel_flip & 4) { voffset2 += voffsetstep2*(xbytes/xstep); voffsetstep2 = -voffsetstep2; } } for(x=0; x<xbytes; x+=xstep) { if(decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only { RGB48VerticalShift(decoder, base, (unsigned short *)scratch, xstep, height, local_pitch, -voffset2); } else { RGB48VerticalShift(decoder, base, (unsigned short *)scratch, xstep, height, local_pitch, voffset1); } if(decoder->channel_decodes == 2) { uint8_t *bptr = base + channel_offset; RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch, xstep, height, local_pitch, -voffset2); } base += xstep; voffset1 += voffsetstep1; voffset2 += voffsetstep2; } } if(decoder->channel_mix_half_res == 1) height *= 2; if(ymin) { memset(local_output, 0, abs(local_pitch)); // zero one line; } for(y=0; y<ymin; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0); } for(; y<ymax; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0); } for(; y<height; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0); } } #endif } // Decode a sample from the input bitstream into the output frame buffer bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0}; int channel_decodes = 1; // 3D Work int channel_offset = 0; int channel_mask = 0; int channel_current = 0; //int wavelet_index; bool result = true; uint8_t *local_output = output; uint8_t *local_buffer = NULL; int local_pitch = pitch; int internal_format = decoder->frame.format; int output_format = decoder->frame.output_format; bool use_local_buffer = false; DECODER *local_decoder = decoder; //Frame_Region emptyFrameMask = {0}; Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER; int orig_width = decoder->frame.width; int orig_height = decoder->frame.height; decoder->local_output = local_output; // used for NV12 decodes. decoder->sample_uncompressed = 0; // set if a uncompressed sample is found. decoder->image_dev_only = 0; if(decoder->flags & (1<<3)) // This is an image development only decode. { decoder->sample_uncompressed = 1; decoder->image_dev_only = 1; decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444; decoder->codec.unique_framenumber = 0; //What should this be? decoder->frame.white_point = 16; // how to we pass this in? decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer; switch(output_format & 0x7fffffff) { case COLOR_FORMAT_RGB24: decoder->uncompressed_size = orig_width * orig_height * 3; break; case COLOR_FORMAT_RGB32: decoder->uncompressed_size = orig_width * orig_height * 4; break; case COLOR_FORMAT_RG48: case COLOR_FORMAT_WP13: decoder->uncompressed_size = orig_width * orig_height * 6; break; default: decoder->uncompressed_size = orig_width * orig_height * 6; assert(0); break; } } decoder->frame.alpha_Companded = 0; // reset this state. if(decoder->parallelDecoder) decoder->parallelDecoder->sample_uncompressed = 0; decoder->error = CODEC_ERROR_OKAY; input->error = BITSTREAM_ERROR_OKAY; // first time through encoded_format is not initized. if(input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed { SAMPLE_HEADER header; BITSTREAM input2; InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ); memset(&header, 0, sizeof(SAMPLE_HEADER)); header.find_lowpass_bands = 2; // help finding the uncompressed flag if(ParseSampleHeader(&input2, &header)) { decoder->codec.encoded_format = header.encoded_format; decoder->sample_uncompressed = header.hdr_uncompressed; if(decoder->parallelDecoder) decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed; } } if((uintptr_t)input->lpCurrentBuffer & 0x3) { if(decoder->aligned_sample_buffer == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; decoder->aligned_sample_buffer = (uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16); #else decoder->aligned_sample_buffer = (uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16); #endif memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); decoder->aligned_sample_buffer_size = input->dwBlockLength; } else { if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size) { memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); } else { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; FreeAligned(decoder->allocator, decoder->aligned_sample_buffer); decoder->aligned_sample_buffer = (uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16); #else MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer); decoder->aligned_sample_buffer = (uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16); #endif memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); decoder->aligned_sample_buffer_size = input->dwBlockLength; } } input->lpCurrentBuffer = decoder->aligned_sample_buffer; input->lpCurrentWord = decoder->aligned_sample_buffer; } #if 0 // Test for missaligning the image data if(((int)input->lpCurrentBuffer&3) == 0) { int i; uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer; int missaligned = 1; //2 or 3 for(i=input->dwBlockLength-1; i>=0; i--) ptr[i+missaligned] = ptr[missaligned]; input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned]; input->lpCurrentWord = (uint8_t *)&ptr[missaligned]; } #endif //HACK // Unfortunately I need color matrix data deep within the codec for RT playback. if(cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input { if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER) { //int size = cfhddata->size; size_t size = cfhddata->size; memset(&decoder->cfhddata, 0, sizeof(CFHDDATA)); if (size > sizeof(CFHDDATA)) { // Limit the size to the known structure size = sizeof(CFHDDATA); } memcpy(&decoder->cfhddata, cfhddata, size); } } else { unsigned short value; if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA)) { memset(&decoder->cfhddata, 0, sizeof(CFHDDATA)); decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER; decoder->cfhddata.size = sizeof(CFHDDATA); if(decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults { decoder->cfhddata.cfhd_subtype = 2; //RGB decoder->cfhddata.num_channels = 3; } else if(GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value)) { if(value == COLOR_FORMAT_RG48) { decoder->cfhddata.cfhd_subtype = 2; //RGB decoder->cfhddata.num_channels = 3; } else if(value == COLOR_FORMAT_RG64) { decoder->cfhddata.cfhd_subtype = 3; //RGBA decoder->cfhddata.num_channels = 4; } else if(value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END) { unsigned int format = BAYER_FORMAT_RED_GRN; decoder->cfhddata.cfhd_subtype = 1; //BAYER decoder->cfhddata.bayer_format = format; // default to Red-Grn decoder->cfhddata.version = CFHDDATA_VERSION; } } } } OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed); if(decoder->image_dev_only) // HACK we need to support 3D also. decoder->source_channels = 1; else decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0); if(!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override)) { //int channels = 0; int channel_blend_type = BLEND_NONE; int channel_swapped_flags = 0; if(decoder->cfhddata.MSCTV_Override) { channel_mask = decoder->cfhddata.MSCTV_Override&0xff; channel_blend_type = ((decoder->cfhddata.MSCTV_Override>>8) & 0xff); channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override>>16) & 0xffff); } else { channel_mask = decoder->cfhddata.MSChannel_type_value&0xff; channel_blend_type = ((decoder->cfhddata.MSChannel_type_value>>8) & 0xff); channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value>>16) & 0xffff); } if(channel_mask != 3) { channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } //if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302 { if(channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only { if(decoder->cfhddata.FramingFlags & 2) // channel swap { SkipVideoChannel(decoder, input, 2); // 3D work } } else if(channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only { if(decoder->cfhddata.FramingFlags & 2) // channel swap { SkipVideoChannel(decoder, input, 1); // 3D work } else { //assume second channel decode SkipVideoChannel(decoder, input, 2); // 3D work } channel_current = 1; channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } else if(channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel { channel_current = 1; channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } else if((channel_mask&3) == 3) // A+B 3d work { channel_decodes = 2; decoder->channel_mix_half_res = 0; if(channel_blend_type != BLEND_NONE) { if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { //if(decoder->frame.format == DECODED_FORMAT_W13A) // { // decoder->frame.format = internal_format = DECODED_FORMAT_W13A; // } //else //{ // decoder->frame.format = internal_format = DECODED_FORMAT_RG64; // } decoder->frame.format = internal_format = DECODED_FORMAT_RGB32; local_pitch = decoder->frame.width * 4; } else { decoder->frame.format = internal_format = DECODED_FORMAT_RGB24; local_pitch = decoder->frame.width * 3; //RGB24 } /* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && (output_format == DECODED_FORMAT_YUYV || output_format == DECODED_FORMAT_UYVY)) { if( channel_blend_type == BLEND_FREEVIEW || ((channel_blend_type == BLEND_STACKED_ANAMORPHIC || channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280)) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch = (decoder->frame.width) * 3; //RGB24 } } */ } /* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA { decoder->frame.format = internal_format = DECODED_FORMAT_YUYV; local_pitch = decoder->frame.width * 2; //YUY2 channel_offset = local_pitch * (decoder->frame.height); use_local_buffer = true; }*/ /* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch /= 2; } */ if( decoder->frame.resolution == DECODED_RESOLUTION_FULL && (channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW)) { if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->sample_uncompressed) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch /= 2; } else { if(decoder->preformatted_3D_type > BLEND_NONE) { // leave as is. } else if(FORMAT8BIT(output_format)) { decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL; decoder->frame.width /= 2; local_pitch /= 2; } } } else { if(FORMAT8BIT(output_format)) decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER; } //TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; } if(channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC)// stacked, side-by-side, fields, Onion, YUY2 { channel_offset = local_pitch * (decoder->frame.height); } else if(channel_blend_type >= BLEND_ANAGLYPH_RC) { /* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph { //B&W using YUYV decoder->frame.format = internal_format = DECODED_FORMAT_YUYV; local_pitch = decoder->frame.width * 2; //YUY2 }*/ channel_offset = local_pitch * (decoder->frame.height); use_local_buffer = true; } else if(channel_blend_type == BLEND_NONE) // double high { channel_offset = pitch * decoder->frame.height; } else { channel_blend_type = BLEND_STACKED_ANAMORPHIC; channel_offset = pitch * (decoder->frame.height/2); } // fields, stacked, etc, only works on full or half res. if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED && decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail. { channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } if (channel_blend_type != BLEND_NONE && (output_format == DECODED_FORMAT_BYR1 || output_format == DECODED_FORMAT_BYR2 || output_format == DECODED_FORMAT_BYR3 || output_format == DECODED_FORMAT_BYR4 )) { channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } } } decoder->channel_decodes = channel_decodes; decoder->channel_blend_type = channel_blend_type; decoder->channel_swapped_flags = channel_swapped_flags; } else { decoder->channel_decodes = channel_decodes = 1; decoder->channel_blend_type = BLEND_NONE; decoder->channel_swapped_flags = 0; } if(cfhddata) // So the P-frames can know the bayerformat { //int size = cfhddata->size; size_t size = cfhddata->size; if (size > sizeof(CFHDDATA)) { size = sizeof(CFHDDATA); } memcpy(cfhddata, &decoder->cfhddata, size); } { bool doOrientation = true; bool doFraming = true; bool doBurins = true; bool doImageflips = true; bool doGhostBust = false; bool doPrimaries = true; int process_path_flags = decoder->cfhddata.process_path_flags; int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask; if(decoder->basic_only) { doOrientation = false; doFraming = false; doBurins = false; doImageflips = false; doPrimaries = false; } else { if(decoder->cfhddata.process_path_flags_mask) { //DAN20101007 -- if(process_path_flags == 0) decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask; process_path_flags &= decoder->cfhddata.process_path_flags_mask; if(process_path_flags_mask & PROCESSING_ACTIVE2) { if(!(process_path_flags_mask & PROCESSING_ORIENTATION)) doOrientation = false; if(!(process_path_flags_mask & PROCESSING_FRAMING)) doFraming = false; if(!(process_path_flags_mask & PROCESSING_BURNINS)) doBurins = false; if(!(process_path_flags_mask & PROCESSING_IMAGEFLIPS)) doImageflips = false; } if(!(process_path_flags_mask & PROCESSING_COLORMATRIX)) doPrimaries = false; } if(process_path_flags & PROCESSING_ACTIVE2) { if(!(process_path_flags & PROCESSING_ORIENTATION)) doOrientation = false; if(!(process_path_flags & PROCESSING_FRAMING)) doFraming = false; if(!(process_path_flags & PROCESSING_BURNINS)) doBurins = false; if(!(process_path_flags & PROCESSING_IMAGEFLIPS)) doImageflips = false; if(!(process_path_flags & PROCESSING_COLORMATRIX)) doPrimaries = false; } } if(doOrientation) process_path_flags |= PROCESSING_ORIENTATION; if(doFraming) process_path_flags |= PROCESSING_FRAMING; if(doBurins) process_path_flags |= PROCESSING_BURNINS; if(doImageflips) process_path_flags |= PROCESSING_IMAGEFLIPS; if(doPrimaries) process_path_flags |= PROCESSING_COLORMATRIX; if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { doGhostBust = true; } } decoder->cfhddata.process_path_flags = process_path_flags; if((!decoder->basic_only && (doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL || decoder->cfhddata.channel[0].FloatingWindowMaskR || decoder->cfhddata.channel[0].FrameKeyStone || decoder->cfhddata.channel[0].FrameTilt || decoder->cfhddata.channel[0].HorizontalOffset || decoder->cfhddata.channel[0].VerticalOffset || decoder->cfhddata.channel[0].RotationOffset || decoder->cfhddata.channel[1].FloatingWindowMaskL || decoder->cfhddata.channel[1].FloatingWindowMaskR || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].FrameTilt || decoder->cfhddata.channel[1].HorizontalOffset || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FloatingWindowMaskL || decoder->cfhddata.channel[2].FloatingWindowMaskR || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].FrameTilt || decoder->cfhddata.channel[2].HorizontalOffset || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0))) || (doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 || decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 || decoder->cfhddata.channel[2].user_blur_sharpen != 0.0)) || (doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 || decoder->cfhddata.channel[1].user_vignette_start != 0.0 || decoder->cfhddata.channel[2].user_vignette_start != 0.0)) || (doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) || decoder->cfhddata.FrameOffsetX || decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0)) || (doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2)) || (doImageflips && decoder->cfhddata.channel_flip) || (decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) || (decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) || (decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes. ( ((decoder->frame.width+7)/8)*8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) || decoder->sample_uncompressed) || (decoder->cfhddata.doMesh) ) { if( output_format == DECODED_FORMAT_BYR1 || output_format == DECODED_FORMAT_BYR2 || output_format == DECODED_FORMAT_BYR3 || output_format == DECODED_FORMAT_BYR4 ) { // no manipulation should be applied } else { use_local_buffer = true; local_pitch = ((decoder->frame.width+7)/8)*8 * 6; //RGB48 if(decoder->image_dev_only) { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_WP13; } else if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_W13A; local_pitch = ((decoder->frame.width+7)/8)*8 * 8; } else { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_WP13; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { local_pitch *= 2; // need horizontal room to make 3D side by side frame } /* if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A) { // preserve HDR decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output if(output_format == DECODED_FORMAT_W13A) local_pitch = decoder->frame.width * 8; } else { if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { decoder->frame.format = internal_format = DECODED_FORMAT_RG64; local_pitch = decoder->frame.width * 8; } else { decoder->frame.format = internal_format = DECODED_FORMAT_RG48; } }*/ channel_offset = local_pitch * (decoder->frame.height); } } } if(output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0) { if(decoder->BYR4LinearRestore == NULL) { int j,val; int encode_curve_type = decoder->cfhddata.encode_curve >> 16; //int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE; float encode_curvebase; if(encode_curve_type) //1 or 2 { if(encode_curve_type & CURVE_TYPE_EXTENDED) encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases else encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); } else { encode_curve_type = CURVE_TYPE_LOG; encode_curvebase = 90.0; } #if _ALLOCATOR decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator,16384*2, 16); #else decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384*2, 16); #endif for(j=0; j<16384; j++) //0 to 1 { switch(encode_curve_type & CURVE_TYPE_MASK) { case CURVE_TYPE_LOG: val = (int)(CURVE_LOG2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_GAMMA: val = (int)(CURVE_GAM2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_CINEON: val = (int)(CURVE_CINEON2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_CINE985: val = (int)(CURVE_CINE9852LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_PARA: val = (int)(CURVE_PARA2LIN((float)j/16384.0f, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f); break; case CURVE_TYPE_CSTYLE: val = (int)(CURVE_CSTYLE2LIN((float)j/16384.0f, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f); break; case CURVE_TYPE_SLOG: val = (int)(CURVE_SLOG2LIN((float)j/16384.0f) * 65535.0f); break; case CURVE_TYPE_LOGC: val = (int)(CURVE_LOGC2LIN((float)j/16384.0f) * 65535.0f); break; case CURVE_TYPE_LINEAR: default: val = j; break; } if(val < 0) val = 0; if(val > 65535) val = 65535; decoder->BYR4LinearRestore[j] = val; } } } //DAN20120319 - removed /*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size { local_pitch *= 2; channel_offset = local_pitch * (decoder->frame.height*2); }*/ if(use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats { int stereoframesize = channel_offset * channel_decodes/*stacked frames*/; if(decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE) stereoframesize = channel_offset; if(channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE) stereoframesize = channel_offset * 2; if(channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE) stereoframesize = channel_offset * 2; if(decoder->StereoBuffer==NULL || decoder->StereoBufferSize < stereoframesize) { #if _ALLOCATOR if(decoder->StereoBuffer) { FreeAligned(decoder->allocator, decoder->StereoBuffer); decoder->StereoBuffer = NULL; } decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet. #else if(decoder->StereoBuffer) { MEMORY_ALIGNED_FREE(decoder->StereoBuffer); decoder->StereoBuffer = NULL; } decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet. #endif assert(decoder->StereoBuffer != NULL); if (! (decoder->StereoBuffer != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->StereoBufferSize = stereoframesize; } decoder->StereoBufferFormat = internal_format; local_buffer = (uint8_t *)decoder->StereoBuffer; local_output = local_buffer; } DecodeEntropyInit(decoder); //swapped -- Maybe useful for double height decodes. /* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED) { local_output += channel_offset; channel_offset = -channel_offset; }*/ decoder->use_local_buffer = use_local_buffer ? 1 : 0; if(channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1) { int encoded_width = decoder->frame.width; int encoded_height = decoder->frame.height; if (decoder->frame.resolution == DECODED_RESOLUTION_HALF) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 2; encoded_height *= 2; } else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 4; encoded_height *= 4; } else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 2; } else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL) { // Compute the encoded dimensions from the frame dimensions encoded_height *= 2; } #if _ALLOCATOR decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER)); if(decoder->parallelDecoder) { memset(decoder->parallelDecoder, 0, sizeof(DECODER)); DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height, internal_format, DECODED_RESOLUTION_FULL, NULL); } #else decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER)); if(decoder->parallelDecoder) { memset(decoder->parallelDecoder, 0, sizeof(DECODER)); decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl; DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height, internal_format, DECODED_RESOLUTION_FULL, NULL); } #endif } // Using the parallel decoder? if (decoder->parallelDecoder) { // Initialize the parallel decoder with parameters from the regular decoder memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA)); memcpy(decoder->parallelDecoder->licensekey,decoder->licensekey, 16); DecodeEntropyInit(decoder->parallelDecoder); DecodeOverrides(decoder->parallelDecoder, decoder->overrideData, decoder->overrideSize); decoder->parallelDecoder->channel_decodes = decoder->channel_decodes; decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type; decoder->parallelDecoder->flags = decoder->flags; decoder->parallelDecoder->frame = decoder->frame; decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0; decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format; if(decoder->parallelDecoder->decoder_thread.pool.thread_count == 0) { CreateLock(&decoder->parallelDecoder->decoder_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool, 1, // ParallelThreadProc, decoder->parallelDecoder); } } if(channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count) { // Second stream as a thread. BITSTREAM second_input = *input; if(decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap { BITSTREAM leftEye_input = *input; SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work *input = leftEye_input; SkipVideoChannel(decoder, &second_input, 1); // 3D work } else SkipVideoChannel(decoder, &second_input, 2); // 3D work decoder->channel_current = 0; decoder->parallelDecoder->channel_current = 1; // Instead of reading the metadata databases again, use the ones in the main decoder OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed); // DAN20110404 Use left (first) eye metadata for both eyes (just in case right GUID is bad.) // OverrideCFHDDATA(decoder->parallelDecoder, input->lpCurrentBuffer, input->nWordsUsed); //OverrideCFHDDATA(decoder->parallelDecoder, second_input.lpCurrentWord, second_input.nWordsUsed); // Hack, this gets lost decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position; #if (_THREADED && _GRAPHICS) if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { if(decoder->cfhddata.BurninFlags & 3) // overlays / tools { DrawStartThreaded(decoder); } } #endif // Post a message to the mailbox decoder->parallelDecoder->decoder_thread.input = &second_input; if(use_local_buffer == false && (decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24)) { decoder->parallelDecoder->decoder_thread.output = local_output; local_output += channel_offset; } else { decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset; } decoder->parallelDecoder->decoder_thread.pitch = local_pitch; decoder->parallelDecoder->decoder_thread.colorparams = colorparams; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1); // Start the transform worker threads ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START); // do the first channel { TAGVALUE segment; int sample_type; #if _THREADED decoder->entropy_worker_new.next_queue_num = 0; decoder->entropy_worker_new.threads_used = 0; #endif // Get the type of sample segment = GetTagValue(input); assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { decoder->error = CODEC_ERROR_BITSTREAM; STOP(tk_decompress); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame) result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_SEQUENCE_HEADER: // The video sequence header is ignored result = true; break; default: // Need to fill the output frame //error = CODEC_ERROR_SAMPLE_TYPE; result = false; } } // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool); } else { while(channel_decodes > 0) { TAGVALUE segment; int sample_type; local_decoder->channel_current = channel_current++; //OverrideCFHDDATA(local_decoder, input->lpCurrentBuffer, input->nWordsUsed); #if (_THREADED && _GRAPHICS) if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { if(decoder->cfhddata.BurninFlags & 3) //overlays / tools { DrawStartThreaded(decoder); } } #endif #if _THREADED local_decoder->entropy_worker_new.next_queue_num = 0; local_decoder->entropy_worker_new.threads_used = 0; #endif if(decoder->image_dev_only) { result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams); } else { // Get the type of sample segment = GetTagValue(input); assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { local_decoder->error = CODEC_ERROR_BITSTREAM; STOP(tk_decompress); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame) result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_SEQUENCE_HEADER: // The video sequence header is ignored result = true; break; default: // Need to fill the output frame //error = CODEC_ERROR_SAMPLE_TYPE; result = false; } } if(ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset)) { channel_decodes = 0; } else { channel_decodes--; local_output += channel_offset; if(decoder->parallelDecoder) { local_decoder = decoder->parallelDecoder; } } } } if(use_local_buffer && output) { decoder->use_local_buffer = 0; #if WARPSTUFF WarpFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat); MaskFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat); #endif ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset)); } else { #if WARPSTUFF WarpFrame(decoder, output, pitch, output_format); MaskFrame(decoder, output, pitch, output_format); #endif } if(decoder->channel_mix_half_res) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; decoder->frame.width *= 2; decoder->frame.height *= 2; decoder->channel_mix_half_res = 0; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; decoder->frame.width *= 2; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; } #if _GRAPHICS if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { PaintFrame(decoder, output, pitch, output_format); } #endif STOP(tk_decompress); // Return indication of whether decoding succeeded or failed return result; } // Decode a sample that encoded a group of frames (return the first frame) bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; int resolution = decoder->frame.resolution; bool result = true; static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0}; static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3}; int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]); #if (0 && DEBUG) // Force quarter resolution decoding for debug that feature resolution = DECODED_RESOLUTION_QUARTER; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoding sample group\n"); } #endif START(tk_decoding); // Initialize the codec state InitCodecState(&decoder->codec); // Allocate the transform data structure for the group of frames AllocDecoderGroup(decoder); // Initialize the tables for decoding the wavelet transforms InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands); // Clear the flags in the wavelet transforms ClearTransformFlags(decoder); // Process the tag value pairs until an encoded subband is found for (;;) { TAGVALUE segment; // Read the next tag value pair from the bitstream //segment = GetTagValue(input); segment = GetSegment(input); assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } // Update the codec state with the information in the tag value pair { TAGWORD tag = segment.tuple.tag; TAGWORD value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; //NOTE: Consider moving the error code into the codec state } } // Check whether the group has been decoded if (codec->sample_done) break; // Skip the rest of the current channel? if (CanSkipChannel(decoder, resolution)) { if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; codec->num_channels = 3; goto decoding_complete; } else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } else { // Compute the bitstream position after the current channel int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Get the temporal wavelet int temporal_index = 2; TRANSFORM *transform = decoder->transform[channel]; IMAGE *wavelet = transform->wavelet[temporal_index]; #if (0 && DEBUG) if (IsBandValid(wavelet, HIGHPASS_BAND)) { int static count = 0; if (count < 20) { char label[_MAX_PATH]; sprintf(label, "Temporal-decode-%d-", count); DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL); } count++; } #endif #if _THREADED_DECODER // Ready to invert this wavelet to get the lowpass band in the lower wavelet? //if (DecodedBandsValid(wavelet, temporal_index)) if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)) #else // Have all bands in the temporal wavelet been decoded? //if (wavelet && BANDS_ALL_VALID(wavelet)) if (AllBandsValid(wavelet)) #endif { //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n"); } #endif #if _THREADED_DECODER // Add the temporal inverse transform to the processing queue if(decoder->entropy_worker_new.pool.thread_count) { ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index, precision, &decoder->scratch, 1); QueueThreadedTransform(decoder, channel, temporal_index); } else #endif { // Reconstruct the lowpass bands in the first level wavelets //ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size); ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index, precision, &decoder->scratch, 0 ); } // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; // Note that the subband flags are also reset when the channel header is decoded } // Was the wavelet created? else if (wavelet == NULL) { // The temporal wavelet is not created during quarter resolution decoding // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } //TODO: Improve quarter resolution decoding so that the wavelet is created? } } } decoding_complete: STOP(tk_decoding); #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int channel; for (channel = 0; channel < codec->num_channels; channel++) { TRANSFORM *transform = decoder->transform[channel]; IMAGE *wavelet = transform->wavelet[2]; uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND]; int height = wavelet->height; int pitch = wavelet->pitch; int size = height * pitch; int band; for (band = 0; band < wavelet->num_bands; band++) { sprintf(label, "Temporal channel: %d, band: %d", channel, band); DumpBandStatistics(label, wavelet, band, logfile); #if 0 sprintf(label, "Temporal-channel%d-band%d-", channel, band); DumpBandPGM(label, wavelet, band, NULL); #endif } assert(size > 0); ZeroMemory(data, size); } } #endif if (result) { // Two frames have been decoded decoder->gop_length = 2; decoder->frame_count += 2; #if (1 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif // Return the first frame in the group if (!decoder->no_output) { #if 0 // Decoding to quarter frame resolution at full frame rate? if (resolution == DECODED_RESOLUTION_QUARTER) { int num_channels = codec->num_channels; FRAME_INFO *info = &decoder->frame; char *buffer = decoder->buffer; size_t buffer_size = decoder->buffer_size; uint8_t *frame1 = output; uint8_t *frame2 = decoder->output2; assert(frame2 != NULL); // Reconstruct two frames at quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame1, frame2, pitch, info, buffer, buffer_size); } else #endif // Finish computing the output frame ReconstructSampleFrameToBuffer(decoder, 0, output, pitch); } if (decoder->error != CODEC_ERROR_OKAY) { result = false; } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample that represents the second frame in a group bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; bool result = true; START(tk_decoding); // Decode the tag value pairs in the frame sample for (;;) { TAGWORD tag; TAGWORD value; // Read the next tag value pair from the bitstream //TAGVALUE segment = GetTagValue(input); TAGVALUE segment = GetSegment(input); assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } // Update the codec state with the information in the tag value pair tag = segment.tuple.tag; value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; } // End of the frame header? if (tag == CODEC_TAG_FRAME_INDEX) break; } STOP(tk_decoding); #if (1 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif if (result) { // Return the second frame in the group // assert(decoder->gop_length >= 2); if (decoder->gop_length >= 2) { int frame_index = 1; // Display the second frame in the group ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch); if (decoder->error != CODEC_ERROR_OKAY) { result = false; } } else if (decoder->gop_length > 0) { int frame_index = 0; // Display the first frame in the group ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch); if (decoder->error != CODEC_ERROR_OKAY) { result = false; } } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Frame type that is not handled // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample that encodes an intra frame bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; int resolution = decoder->frame.resolution; bool result = true; static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0}; static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}; int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]); START(tk_decoding); if(decoder->image_dev_only) goto decoding_completeI; // Initialize the codec state InitCodecState(&decoder->codec); // Allocate the transform data structure for the group of frames AllocDecoderGroup(decoder); // Initialize the tables for decoding the wavelet transforms InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands); // Clear the flags in the wavelet transforms ClearTransformFlags(decoder); //Force V210 output for debugging ***DEBUG*** //decoder->frame.format = DECODED_FORMAT_V210; // Process the tag value pairs until an encoded subband is found for (;;) { TAGVALUE segment; // Read the next tag value pair from the bitstream segment = GetSegment(input); assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } { TAGWORD tag = segment.tuple.tag; TAGWORD value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; //NOTE: Consider moving the error code into the codec state } } // Check whether the group has been decoded if (codec->sample_done) { break; } // Skip the rest of the current channel? if (CanSkipChannel(decoder, resolution)) { if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; codec->num_channels = 3; goto decoding_completeI; } else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } else { // Compute the bitstream position after the current channel int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Get the highest wavelet in the pyramid int wavelet_index = 2; TRANSFORM *transform = decoder->transform[channel]; IMAGE *wavelet = transform->wavelet[wavelet_index]; #if _THREADED_DECODER // Ready to invert this wavelet to get the lowpass band in the lower wavelet? //if (DecodedBandsValid(wavelet, temporal_index)) if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)) #else // Have all bands in the wavelet been decoded? if (AllBandsValid(wavelet)) #endif { //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int band; sprintf(label, "Channel: %d, index: %d", channel, wavelet_index); DumpImageStatistics(label, wavelet, logfile); #if 1 for (band = 1; band < wavelet->num_bands; band++) { sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band); DumpBandStatistics(label, wavelet, band, logfile); } #endif } #endif #if (0 & DEBUG) if (logfile) { fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n"); } #endif #if _THREADED_DECODER // Add the inverse spatial transform to the processing queue if(decoder->entropy_worker_new.pool.thread_count) { ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index, precision, &decoder->scratch, 1); QueueThreadedTransform(decoder, channel, wavelet_index); } else #endif { // Reconstruct the lowpass bands in the first level wavelets //ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size); ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index, precision, &decoder->scratch, 0); } // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; // Note that the subband flags are also reset when the channel header is decoded } // Was the wavelet created? //else if (wavelet == NULL) else { // The wavelet may not have been created during quarter resolution decoding // The wavelet should have been created if all bands are valid assert(wavelet != NULL); // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } //TODO: Improve quarter resolution decoding so that the wavelet is created? } } } decoding_completeI: STOP(tk_decoding); if (result) { // One frame has been decoded decoder->gop_length = 1; decoder->frame_count += 1; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif // Return the first frame (the only frame that was decoded) if (!decoder->no_output) { int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { //CODEC_STATE *codec = &decoder->codec; TRANSFORM **transform_array = decoder->transform; int num_channels = codec->num_channels; //int progressive = codec->progressive; FRAME_INFO *info = &decoder->frame; int precision = codec->precision; #if _THREADED_DECODER // Wait until the transform thread has finished all pending transforms WaitForTransformThread(decoder); #endif ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision); } else { // Finish computing the output frame ReconstructSampleFrameToBuffer(decoder, 0, output, pitch); } } if (decoder->error != CODEC_ERROR_OKAY) { result = false; } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample channel header bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_ERROR error = CODEC_ERROR_OKAY; CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; CHANNEL_HEADER header; TRANSFORM *transform = decoder->transform[channel]; TRANSFORM *next_transform; // Advance to the next channel channel++; // Get the next transform for decoded information //TRANSFORM *next_transform = AllocGroupTransform(group, channel); // Decode the rest of the channel header error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL); assert(error == CODEC_ERROR_OKAY); decoder->error = error; if (error != CODEC_ERROR_OKAY) return false; // The decoder is not able to skip channels assert(header.channel == channel); // Initialize the next transform using the previous one next_transform = decoder->transform[channel]; InitChannelTransform(next_transform, transform); // Update the channel codec->channel = channel; // Reset the subband counter codec->band.subband = 0; // Reset the decoded subband flags codec->decoded_subband_flags = 0; // Loop back to decode the next channel //transform = next_transform; return true; } // Decode the coefficients in a subband bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; TRANSFORM *transform = decoder->transform[channel]; int *subband_wavelet_index = decoder->subband_wavelet_index; // Used for quarter resolution and threaded decoding int transform_type = transform->type; // Wavelet parameters int width; int height; int level; int type; int band; int threading = 1; // Wavelet containing the band to decode int index; IMAGE *wavelet = NULL; bool result; if(subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS) threading = 0; // Update the transform data structure from the codec state UpdateCodecTransform(transform, codec); // Is this an empty band? if (subband == 255) { // Decode an empty band // This wavelet is the temporal wavelet index = 2; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->band.width; height = codec->band.height; level = codec->highpass.wavelet_level; type = codec->highpass.wavelet_type; band = codec->band.number; // The empty band should be the highpass band in a temporal wavelet assert(type == WAVELET_TYPE_TEMPORAL && band == 1); #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif // Set the wavelet parameters wavelet->pixel_type[band] = PIXEL_TYPE_16S; wavelet->num_bands = 2; result = DecodeSampleEmptyBand(decoder, input, wavelet, band); // Set the subband number for the next band expected in the bitstream codec->band.subband = 11; } // Is this a highpass band? else if (subband > 0) { // Decode a highpass band // Get the wavelet that contains this subband index = subband_wavelet_index[subband]; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->band.width; height = codec->band.height; level = codec->highpass.wavelet_level; type = codec->highpass.wavelet_type; band = codec->band.number; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandStartedFlags(decoder, wavelet, band); } // Reset the default encoding method codec->band.encoding = BAND_ENCODING_RUNLENGTHS; // Set the subband number for the next band expected in the bitstream codec->band.subband = subband + 1; } else { // Decode a lowpass band // Get the wavelet that contains this subband index = subband_wavelet_index[0]; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->lowpass.width; height = codec->lowpass.height; level = codec->lowpass.level; type = codec->first_wavelet; //band = codec->band.number; band = 0; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif // The lowpass data is always stored in wavelet band zero assert(band == 0); // The lowpass band must be subband zero assert(subband == 0); result = DecodeSampleLowPassBand(decoder, input, wavelet); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } // Set the subband number for the next band expected in the bitstream codec->band.subband = subband + 1; } // Was the subband successfully decoded? if (result) { // The transform will set the band valid flag if this is the temporal wavelet //if (index != 2) // Record that this subband has been decoded successfully if (0 <= subband && subband <= CODEC_MAX_SUBBAND) codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband); #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n", subband, index, channel); } #endif } #if _THREADED_DECODER // Ready to queue a threaded transform to invert this wavelet? if (BANDS_ALL_STARTED(wavelet)) { // Are frames being decoded to quarter resolution? if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { // Smallest spatial wavelet above the lowpass temporal band (fieldplus transform) int highest_index = 5; if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Smallest wavelet in the spatial transform highest_index = 2; } // Only the smallest spatial wavelet must be reconstructed if (index != highest_index) { return result; } //TODO: Can we improve on the current scheme for quarter resolution decoding? } if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2) { if(decoder->entropy_worker_new.pool.thread_count && threading) { ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index, codec->precision, &decoder->scratch, 1); // Add the inverse wavelet transform to the processing queue QueueThreadedTransform(decoder, codec->channel, index); } else { // Apply the inverse wavelet transform to reconstruct the lower level wavelet ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index, codec->precision, &decoder->scratch, 0); } } } #else // Ready to invert this wavelet to get the lowpass band in the lower wavelet? if (BANDS_ALL_VALID(wavelet)) { int channel = codec->channel; //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int band; sprintf(label, "Channel: %d, index: %d", channel, index); DumpImageStatistics(label, wavelet, logfile); #if 1 for (band = 1; band < wavelet->num_bands; band++) { sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band); DumpBandStatistics(label, wavelet, band, logfile); } #endif } #endif // Are frames being decoded to quarter resolution? if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { // Smallest spatial wavelet above the lowpass temporal band (fieldplus transform) int highest_index = 5; if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Smallest wavelet in the spatial transform highest_index = 2; } // Only the smallest spatial wavelet must be reconstructed if (index != highest_index) { return result; } //TODO: Can we improve on the current scheme for quarter resolution decoding? } // Apply the inverse wavelet transform to reconstruct the lower level wavelet ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0); } #endif return result; } // Decode the coefficients in a lowpass band bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; bool result = true; int lowpass_width; // Lowpass band dimensions int lowpass_height; int lowpass_pitch; PIXEL *pLowPassRow; // Pointer into the lowpass band //int wavelet_width; // Dimensions of the wavelet image //int wavelet_height; int bits_per_pixel; int quantization; int offset; //int pixel_divisor = (1 << (2 * codec->lowpass.level)); int row, column; int32_t solid_color = -1; const int gain = 128; const int colorshift = 0; // int channelgain[4]; //int waterrow=19, watercol=214; //int cspace = decoder->frame.colorspace; // Lowpass image dimensions may be smaller than the wavelet dimensions // because the encoder may have transmitted an image without the border lowpass_width = codec->lowpass.width; lowpass_height = codec->lowpass.height; lowpass_pitch = wavelet->pitch/sizeof(PIXEL); pLowPassRow = wavelet->band[0]; // Get the parameters for quantization performed by the encoder quantization = codec->lowpass.quantization; offset = codec->lowpass.pixel_offset; bits_per_pixel = codec->lowpass.bits_per_pixel; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decode lowpass subband\n"); } #endif if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width&1)) { int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord; //int signval = 0; //int channel3stats = 0; int channeloffset = 0; if(decoder->codec.precision == 8) { channeloffset = (codec->num_frames==2 ? 64 : 32); } else if(decoder->codec.precision == 10) { switch(decoder->frame.format) { case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: channeloffset = codec->num_frames==2 ? 14 : 4;//DAN20090601, recal I-frame DAN20110301 break; default: channeloffset = codec->num_frames==2 ? 48 : 24;//DAN20090601 } if(decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?) channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames } else if(decoder->codec.precision == 12) { switch(decoder->frame.format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED: channeloffset = 8; //DAN200906010 break; // 16-bit precision: case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_B64A: case DECODED_FORMAT_WP13: case DECODED_FORMAT_W13A: channeloffset = 0; break; case DECODED_FORMAT_RG30: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed. break; default: channeloffset = 0; break; } } if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames channeloffset = 0; #define DUMPLL 0 #if (_DEBUG && DUMPLL) FILE *fp; if(channel == 0) { static int inc = 1; char name[256]; sprintf(name,"C:\\Cedoc\\LLdec%03d.pgm", inc++); fp = fopen(name,"w"); fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height); } #endif #if LOSSLESS channeloffset = 0; //LOSSLESS #endif //if(lpCurrentLong[0] == 0xffffffff) if(lpCurrentLong[0] == (int32_t)UINT32_MAX) { if(SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width) { if(SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height) { solid_color = SwapInt32BtoN(lpCurrentLong[1]); solid_color |= (solid_color<<16); lpCurrentLong += 4; } } } // Decode each row in the lowpass image for (row = 0; row < lowpass_height; row++) { int pixels; // Start at the first column column = 0; // Process the rest of the row { for (; column < lowpass_width; column++) { int pixel_value; //int i; // Perform inverse quantization if(column & 1) { pixel_value = pixels; } else { //pixels = _bswap(*(lpCurrentLong++)); if(solid_color == -1) pixels = SwapInt32BtoN(*(lpCurrentLong++)); else pixels = solid_color; pixel_value = (pixels>>16); pixels <<= 16; pixels >>= 16; } // Store the pixel in the lowpass band of the wavelet pixel_value += channeloffset; // pixel_value -= 64; // pixel_value += ((rand() & 0x7fff) - 0x4000); // if(pixel_value < 0) pixel_value = 0; if(pixel_value > 0x7fff) pixel_value = 0x7fff; pLowPassRow[column] = pixel_value; #if (_DEBUG && DUMPLL) if(channel==0 && fp) fprintf(fp, "%d\n", pixel_value>>7); #endif } } // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } #if (_DEBUG && DUMPLL) if(channel == 0 && fp) fclose(fp); #endif #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord)); #endif // Update the bitstream stream->lpCurrentWord = (uint8_t *)lpCurrentLong; } else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE) { uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord; //int signval = 0; // Decode each row in the lowpass image for (row = 0; row < lowpass_height; row++) { // Start at the first column column = 0; // Process the rest of the row for (; column < lowpass_width; column++) { int pixel_value = *(lpCurrentByte++); // Perform inverse quantization #if _ENCODE_CHROMA_ZERO if (channel == 0) pixel_value = (quantization * pixel_value) + offset; else pixel_value = (pixel_value - offset) * quantization; #else pixel_value = (quantization * pixel_value) + offset;// + colorshift; #endif pixel_value -= 128 * quantization; pixel_value *= gain; pixel_value >>= 7; pixel_value += 128 * quantization; pixel_value += colorshift; // Store the pixel in the lowpass band of the wavelet // Multiply by 16 to turn 8-bit into the new 16-bit format pLowPassRow[column] = pixel_value * 16; } // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord)); #endif // Update the bitstream stream->lpCurrentWord = (uint8_t *)lpCurrentByte; } else { int channeloffset = 0; if(decoder->codec.precision == 8) { channeloffset = (codec->num_frames==2 ? 64 : 32); } else if(decoder->codec.precision == 10) { channeloffset = (codec->num_frames==2 ? 10 : 5); } else if(decoder->codec.precision == 12) { // channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images } //DAN20050923 no longer trying to compensate for YUV to RGB issues. if(decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32) { if(decoder->codec.precision == 8) { switch(channel) { case 0: channeloffset += 8; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += 16; break; case 2: channeloffset += 10; break; } } else if(decoder->codec.precision == 10) { switch(channel) { case 0: channeloffset += -8; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += -4; break; case 2: channeloffset += -4; break; } } else if(decoder->codec.precision == 12) { switch(channel) { case 0: channeloffset += 0; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += 0; break; case 2: channeloffset += 0; break; } } } if(bits_per_pixel != 16) channeloffset = 0; for (row = 0; row < lowpass_height; row++) { for (column = 0; column < lowpass_width; column++) { int pixel_value = GetBits(stream, bits_per_pixel); // Perform inverse quantization #if _ENCODE_CHROMA_ZERO if (channel == 0) pixel_value = (quantization * pixel_value) + offset; else pixel_value = (pixel_value - offset) * quantization; #else pixel_value = (quantization * pixel_value) + offset;// + colorshift; #endif // Store the pixel in the lowpass band of the wavelet pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes } stream->nWordsUsed -= lowpass_width*(bits_per_pixel>>3); // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } } // Set the wavelet scale factor wavelet->scale[0] = quantization; // Align the bitstream to the next tag value pair AlignBitsTag(stream); // Return indication of lowpass decoding success return result; } // Decode the coefficients in a highpass band bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int index = codec->highpass.wavelet_number; int width; int height; int quantization; // The encoder may not have used variable-length coding int method = codec->band.encoding; bool result = true; // Check that the band index is in range assert(0 <= band && band <= codec->max_subband); // Encoded coefficients start on a tag boundary AlignBitsTag(stream); #if (0 && DEBUG) // Dump the band header to the logfile if (logfile) { fprintf(logfile, "Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n", header->marker, header->subband, header->width, header->height, header->encoding); } #endif // Copy the scale factors used by the encoder into the wavelet band // (Zero means that the encoder did not supply this parameter) if (codec->band.scale > 0) { wavelet->scale[band] = codec->band.scale; } // Get the quantization factor that was used to encode the band coefficients quantization = codec->band.quantization; // Copy the quantization into the wavelet wavelet->quantization[band] = quantization; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization); } #endif // Get the highpass band dimensions width = codec->band.width; height = codec->band.height; // Is this a special band for the temporal high pass thumbnail? if (method == BAND_ENCODING_LOSSLESS) { //lossless temporal subband //DAN20060701 result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height); assert(result); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } } else if (method == BAND_ENCODING_16BIT) { //lossless temporal subband //DAN20060701 result = DecodeBand16s(decoder, stream, wavelet, band, width, height); assert(result); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } } else { // Must use the runlength encoding method assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS); #if 0 // This code attempts to not decode various subbands for 1/4 res decodes. // Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.) if (subband >= 4 && subband <= 6) { TAGVALUE segment; AlignBitsTag(stream); do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } else #elif 0 // Is this subband required for decoding the frame? if (CanSkipSubband(decoder, subband)) { // Skip past the end of this subband SkipSubband(stream); } #endif // Decode this subband result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading); } // Return failure if a problem was encountered while reading the band coefficients if (!result) return result; // The encoded band coefficients end on a bitstream word boundary // to avoid interference with the marker for the coefficient band trailer AlignBits(stream); // Decode the band trailer error = DecodeBandTrailer(stream, NULL); decoder->error = error; assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Error in band %d trailer: %d\n", band, error); } #endif return false; } return result; } // Decode an empty band bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int quantization; // Check that the band is in range assert(0 <= band && band <= CODEC_MAX_HIGHBANDS); // Check that the highpass band is 16 bits assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S); #if (0 && DEBUG) //TODO: Change format string to handle 64-bit pointers if (logfile) { fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord); } #endif // Encoded coefficients must start on a word boundary AlignBits(stream); // Copy the scale factors used by the encoder into the wavelet band // (Zero means that the encoder did not supply the parameter) if (codec->band.scale > 0) wavelet->scale[band] = codec->band.scale; // Set the quantization used to encode the band coefficients quantization = codec->band.quantization; wavelet->quantization[band] = quantization; #if (0 && DEBUG) if (logfile) { DumpBits(stream, logfile); } #endif // Decode the band trailer error = DecodeBandTrailer(stream, NULL); decoder->error = error; assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Error in band: %d, error: %d\n", band, error); } #endif return false; } // The encoded band coefficients end on a bitstream word boundary // to avoid interference with the marker for the coefficient band trailer AlignBits(stream); #if (0 && DEBUG) // Dump the band trailer to the logfile if (logfile) { fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker); } #endif #if (0 && DEBUG) if (logfile) { //TODO: Change format string to handle 64-bit pointers fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord); } #endif return true; } bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { PIXEL *rowptr = wavelet->band[band_index]; int pitch = wavelet->pitch; int row,dequant = wavelet->quantization[band_index]; // Convert the pitch from bytes to pixels pitch /= sizeof(PIXEL); //BAND_ENCODING_16BIT if(dequant == 1) { for (row = 0; row < height; row++) { int column; #if 0 for (column = 0; column < width; column++) { int value = GetWord16s(stream); rowptr[column] = value; } #else // Mild speedup (2.5% overall half-res decode improvement.) char *sptr = (char *)stream->lpCurrentWord; char *dptr = (char *)rowptr; for (column = 0; column < width; column++) { *(dptr+1) = *sptr++; *dptr = *sptr++; dptr+=2; } stream->lpCurrentWord += width*2; stream->nWordsUsed += width*2; #endif rowptr += pitch; } } else { for (row = 0; row < height; row++) { int column; for (column = 0; column < width; column++) { int value = GetWord16s(stream); rowptr[column] = value*dequant; } rowptr += pitch; } } #if (0 && DEBUG) { int static count = 0; if (count < 20) { char label[_MAX_PATH]; sprintf(label, "Hightemp-decode-%d-", count); DumpBandPGM(label, wavelet, band_index, NULL); } count++; } #endif return true; } bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result = true; int quant = wavelet->quantization[band_index]; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; int size; PIXEL *rowptr; //int row = 0; int pitch; //CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int num_subbands = codec->num_subbands; //int pixel_type = wavelet->pixel_type[band_index]; //int difference_coding = decoder->codec.difference_coding; //int localquant = 1; //int threading = 0; decoder->codec.active_codebook = 0; // reset CODEC state decoder->codec.difference_coding = 0; //reset state for next subband // Must have a valid wavelet assert(wavelet != NULL); if (! (wavelet != NULL)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } //Must have a valid FSM assert(fsm != NULL); if (! (fsm != NULL)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // All rows are treated as one int32_t row that covers the entire band size = fsm->table.num_states; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch; assert(rowptr != NULL && pitch != 0); if (! (rowptr != NULL && pitch != 0)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes. if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } if(quant) { int x,y; PIXEL *line = rowptr; if(quant == 32) { for(y=0;y<height;y++) { for(x=0;x<width;x++) { line[x] <<= 5; } line += pitch/2; } } else { for(y=0;y<height;y++) { for(x=0;x<width;x++) { line[x] *= quant; } line += pitch/2; } } } /* if(once <= 60) { char name[200]; FILE *fp; sprintf(name,"C:/Cedoc/DUMP/Decoder/dump%02d.raw", once); fp = fopen(name,"wb"); fwrite(rowptr,width*height,1,fp); fclose(fp); once++; }*/ assert(result == true); if (! (result == true)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } return true; } // Invert the wavelet to reconstruct the lower wavelet in the transform void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel, IMAGE *wavelet, int index, int precision, const SCRATCH *scratch, int allocations_only) { int transform_type = transform->type; int width = wavelet->width; int height = wavelet->height; int level = wavelet->level; PIXEL *buffer = (PIXEL *)scratch->free_ptr; size_t buffer_size = scratch->free_size; // Is the current wavelet a spatial wavelet? if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0) { // Reconstruct the lowpass band in the lower wavelet int lowpass_index = index - 1; IMAGE *lowpass = transform->wavelet[lowpass_index]; int lowpass_width = 2 * width; int lowpass_height = 2 * height; int lowpass_level = level - 1; int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL; //const int prescale = 1; const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = transform->prescale[index]; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #endif transform->wavelet[lowpass_index] = lowpass; #endif // Check that the lowpass band has not already been reconstructed //assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0); if(!allocations_only) { // Check that all of the wavelet bands have been decoded assert(BANDS_ALL_VALID(wavelet)); // Has this wavelet already been reconstructed? if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0) { // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); //TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale); TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, lowpass, 0); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } } // Is the current wavelet a spatial wavelet above the temporal lowpass band? else if (index > 3) { // Reconstruct the lowpass band in the lower wavelet const int temporal_wavelet_index = 2; int lowpass_index = (index > 4) ? index - 1 : index - 2; IMAGE *lowpass = transform->wavelet[lowpass_index]; int lowpass_width = 2 * width; int lowpass_height = 2 * height; int lowpass_level = level - 1; int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL); //const int prescale = 2; const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = transform->prescale[index]; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #endif transform->wavelet[lowpass_index] = lowpass; #endif if(!allocations_only) { // Check that the lowpass band has not already been reconstructed assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0); // Check that all of the wavelet bands have been decoded assert(BANDS_ALL_VALID(wavelet)); // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); //TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale); TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, lowpass, 0); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } // Is the current wavelet the spatial wavelet above the temporal highpass band? else if (index == 3) { // Reconstruct the highpass band in the temporal wavelet const int temporal_wavelet_index = 2; int highpass_index = index - 1; IMAGE *highpass = transform->wavelet[highpass_index]; int highpass_width = 2 * width; int highpass_height = 2 * height; int highpass_level = level - 1; int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL); const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = inverse_prescale ? transform->prescale[index] : 0; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety highpass = GetWaveletThreadSafe(decoder, transform, highpass_index, highpass_width, highpass_height, highpass_level, highpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR highpass = ReallocWaveletEx(decoder->allocator, highpass , highpass_width, highpass_height, highpass_level, highpass_type); #else highpass = ReallocWaveletEx(highpass , highpass_width, highpass_height, highpass_level, highpass_type); #endif transform->wavelet[highpass_index] = highpass; #endif if(!allocations_only) { // Check that the highpass band has not already been reconstructed assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0); // Check that all of the wavelet bands have been decoded assert(BANDS_ALL_VALID(wavelet)); // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, highpass, 1); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } // Is the current wavelet the temporal wavelet? else if (index == 2) { // Get the temporal wavelet IMAGE *temporal = wavelet; // Set the frame wavelet parameters int frame_level = 1; int frame_type = WAVELET_TYPE_FRAME; // Get the two frame wavelets IMAGE *frame[2]; frame[0] = transform->wavelet[0]; frame[1] = transform->wavelet[1]; // Check that the temporal wavelet is valid assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL); #if _THREADED_DECODER // Allocate (or reallocate) the frame wavelets with thread safety frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type); frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type); #else // Allocate the frame wavelets if not already allocated #if _ALLOCATOR frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type); frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type); #else frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type); frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type); #endif transform->wavelet[0] = frame[0]; transform->wavelet[1] = frame[1]; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Before inverse temporal transform"); DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile); } #endif if(!allocations_only) { // Check that the lowpass bands have not already been reconstructed assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0); assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0); // Check that all of the wavelet bands have been decoded assert(BANDS_ALL_VALID(temporal)); // Invert the temporal transform between the frame wavelets STOP(tk_decoding); START(tk_inverse); TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision); STOP(tk_inverse); START(tk_decoding); #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = quad[0]; fprintf(logfile, "After inverse temporal transform\n"); DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, frame[0], 0); UpdateWaveletBandValidFlags(decoder, frame[1], 0); #if TIMING // Increment the number of temporal transforms performed outside of decoding temporal_decoding_count++; #endif } } } // Compute the dimensions of the output buffer void ComputeOutputDimensions(DECODER *decoder, int frame, int *decoded_width_out, int *decoded_height_out) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; FRAME_INFO *info = &decoder->frame; //int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; IMAGE *wavelet = NULL; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; int decoded_scale = 0; if (decoded_width_out == NULL || decoded_height_out == NULL) { return; } // Clear the return values in case this routine terminates early *decoded_width_out = 0; *decoded_height_out = 0; // Get the decoding scale switch(resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF_HORIZONTAL: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame)); #endif decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_HALF: #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame)); #endif decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame)); #endif decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; } else { decoded_scale = 1; wavelet = transform_array[0]->wavelet[3]; } break; case DECODED_RESOLUTION_LOWPASS_ONLY: decoded_scale = 1; wavelet = transform_array[0]->wavelet[5]; if(wavelet == NULL) // there Intra Frame compressed wavelet = transform_array[0]->wavelet[2]; break; default: assert(0); break; } // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) decoded_width = wavelet_width; else decoded_width = decoded_scale * wavelet_width; decoded_height = decoded_scale * wavelet_height; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n", decoded_width, decoded_height, info->width, info->height, pitch); } #endif // Return the decoded width and height *decoded_width_out = decoded_width; *decoded_height_out = decoded_height; } #define DEBUG_ROW16U 0 void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { FRAME_INFO local_info; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &local_info; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; IMAGE *wavelet; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; int resolution = decoder->frame.resolution; int chroma_offset = decoder->codec.chroma_offset; int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; //TODO: Change this routine to return the codec error code CODEC_ERROR error = CODEC_ERROR_OKAY; //if(decoder->cfhddata.calibration) // LoadTweak(); //TODO: Change this routine to return an error code if (decoder == NULL) { return; } decoder->gop_frame_num = frame; #if _THREADED_DECODER // Wait until the transform thread has finished all pending transforms WaitForTransformThread(decoder); #endif //return; // copy frame info in a changable local structure memcpy(info, &decoder->frame, sizeof(FRAME_INFO)); // Use the old code for reconstructing the frame #if (0 && DEBUG) // Force quarter resolution decoding for debugging that feature resolution = DECODED_RESOLUTION_QUARTER; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame); } #endif // The decoder can decode a video sample without returning a frame if (output == NULL || pitch == 0) return; #if (1 && DEBUG_ROW16U) // Force decoding to 16-bit pixels for debugging info->format = DECODED_FORMAT_YR16; #endif #if 0 if (info->format == DECODED_FORMAT_YR16) { // Force interlaced or progressive decoding for debugging //progressive = false; progressive = true; } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags); } #endif // Does this frame have to be reconstructed? if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder discarding frame: %d\n", frame); } #endif return; } // Check that the requested frame is within the limits of the group of frames assert(0 <= frame && frame < decoder->gop_length); // Check that the frame resolution is valid assert(IsValidFrameResolution(resolution)); if (!IsValidFrameResolution(resolution)) { decoder->error = CODEC_ERROR_RESOLUTION; return; } #if (0 && TIMING) //(0 && DEBUG) // Override progressive flag read from the bitstream for debugging //progressive = 0; // Use the inverse frame transform progressive = 1; // Use the inverse spatial transform #endif // Build the 3D LUTs if needed ComputeCube(decoder); //HACK DAN20110131 -- some formats will not directly decode so need to use the AM route { if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 && resolution == DECODED_RESOLUTION_HALF) { if( decoder->frame.format == COLOR_FORMAT_R408 || decoder->frame.format == COLOR_FORMAT_V408) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } if( decoder->frame.format == COLOR_FORMAT_NV12) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; // TODO, make it work with this. } if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } // Get the decoding scale if(!uncompressed) { switch(resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame)); #endif wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = 2 * wavelet_width; decoded_height = 2 * wavelet_height; break; case DECODED_RESOLUTION_HALF: #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame)); #endif wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; case DECODED_RESOLUTION_HALF_HORIZONTAL: #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame)); #endif wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = 2 * wavelet_height; break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame)); #endif wavelet = transform_array[0]->wavelet[0]; } else { wavelet = transform_array[0]->wavelet[3]; } // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; case DECODED_RESOLUTION_LOWPASS_ONLY: wavelet = transform_array[0]->wavelet[5]; if(wavelet == NULL) // there Intra Frame compressed wavelet = transform_array[0]->wavelet[2]; // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; default: assert(0); break; } } else { if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { decoded_width = info->width/2; decoded_height = info->height/2; } else { decoded_width = info->width; decoded_height = info->height; } } if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { if(resolution == DECODED_RESOLUTION_FULL) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER; } } else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; } } else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { if(decoded_width*2 == info->width) { info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER; } } else if(decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER; } } else { if(resolution == DECODED_RESOLUTION_HALF) { if(decoded_width*2 == info->width) { decoded_width *= 2; decoded_height *= 2; info->resolution = resolution = DECODED_RESOLUTION_FULL; } } else if(resolution == DECODED_RESOLUTION_QUARTER) { if(uncompressed) { decoded_width *= 2; decoded_height *= 2; info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED; } else { if(decoded_width == info->width) { info->resolution = resolution = DECODED_RESOLUTION_HALF; } } } } } if(uncompressed) { // Call the appropriate routine for the encoded format switch (decoder->codec.encoded_format) { case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4 // Not implemented assert(0); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case ENCODED_FORMAT_BAYER: // Bayer encoded data // Add new code here for the final steps in decoding the Bayer format error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch); break; case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210) error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT; break; case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0) error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT; break; default: // Fall through into the old code for reconstructing frames error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } } else { // Call the appropriate routine for the encoded format switch (decoder->codec.encoded_format) { case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4 case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4 error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch); break; case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4 // Not implemented assert(0); //error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch); break; case ENCODED_FORMAT_BAYER: // Bayer encoded data // Add new code here for the final steps in decoding the Bayer format error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch); break; case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 // Add new code here for the final steps in decoding the original YUV 4:2:2 format error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch); break; default: // Fall through into the old code for reconstructing frames error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } } // Was the newer code able to successfully reconstruct the frame? if (error != CODEC_ERROR_UNSUPPORTED_FORMAT) { // Save the codec error code in the decoder state and return decoder->error = error; return; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n", decoded_width, decoded_height, info->width, info->height, pitch); } #endif #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = transform[0]->wavelet[frame]; int band = 0; fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band); DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Check that the requested frame is large enough to hold the decoded frame #if (0 && DEBUG) //if (! (info->width >= decoded_width)) { if (logfile) { //fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width); fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width); } } #endif assert(info->width >= decoded_width); assert((info->height+7)/8 >= (decoded_height+7)/8); if (!(info->width >= decoded_width && (info->height+7)/8 >= (decoded_height+7)/8)) { decoder->error = CODEC_ERROR_FRAMESIZE; return; } #if (0 && DEBUG) if (logfile) { //SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16); SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16); // Adjust the subimage to be at the middle of the right border //subimage.row += wavelet_height/2 - 8; DumpBand("SIF Image", wavelet, 0, &subimage, logfile); } #endif START(tk_inverse); if (resolution == DECODED_RESOLUTION_QUARTER) { int precision = codec->precision; // Reconstruct the frame to quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch, info, &decoder->scratch, precision); } else // Was the first transform a frame transform (used for interlaced frames)? if (!progressive) { // Can the inverse frame transform and output byte packing be done in one pass? if ((resolution == DECODED_RESOLUTION_FULL) && (info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY)) { // Apply the inverse frame transform and pack the results into the output buffer int precision = codec->precision; #if (0 && DEBUG) DumpWaveletBandsPGM(wavelet, frame, num_channels); #endif #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#if BUILD_PROSPECT else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16) { // Apply the inverse frame transform and output rows of luma and chroma //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; int precision = codec->precision; #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#endif else { // Reconstruct the frame as separate planes and combine the planes into a packed output image int channel; if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int scale = 13; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } else // In SIF resolution, no need to reconstruct the bottom-level wavelet transforms // Just copy the lowpass images directly into output frame if (resolution == DECODED_RESOLUTION_HALF) { int precision = codec->precision; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } // In full resolution, reconstruct the frame wavelet and // convert the YUYV output to the specified color format else { int precision = codec->precision; TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); } } } else // The first transform was a spatial transform (used for progressive frames) { // Can the inverse frame transform and output byte packing be done in one pass? if ((resolution == DECODED_RESOLUTION_FULL) && (info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2) { int precision = codec->precision; //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; // Apply the inverse frame transform and pack the results into the output buffer #if _THREADED if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { uint8_t *pixoutput = output; if(decoder->use_active_metadata_decoder) //WIP { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sBayerThruLUT); } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToBayerYUV); } } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToYUV); } #else //TODO : Accelerated BAYER for single thread decoding. assert(0); // Transform the wavelets for each channel to the output image (not threaded) //TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info, // &decoder->scratch, chroma_offset, precision); #endif } else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER && (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder) { int precision = codec->precision; //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; // Apply the inverse frame transform and pack the results into the output buffer #if _THREADED { uint8_t *pixoutput = output; if(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) { pixoutput += (info->height-1)*pitch; pitch = -pitch; } TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sBayerThruLUT); } #endif } //#if BUILD_PROSPECT else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16) { // Apply the inverse frame transform and output rows of luma and chroma int precision = codec->precision; #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#endif else { // Reconstruct the frame as separate planes and combine the planes into a packed output image int channel; if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { //int precision = codec->precision; int scale = 13; //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } else // In SIF resolution, no need to reconstruct the bottom-level wavelet transforms // Just copy the lowpass images directly into output frame if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { int precision = codec->precision; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; char *format = decoded_format_string[info->format]; sprintf(label, "Output, channel: %d, format: %s", channel, format); DumpImageStatistics(label, lowpass_images[channel], logfile); } #endif } STOP(tk_inverse); #if 1 //|| BAYER_SUPPORT if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else //unsigned short scanline[4096*3],*sptr; //unsigned short scanline2[4096*3],*sptr2; unsigned short *scanline,*sptr; unsigned short *scanline2,*sptr2; char *buffer = decoder->scratch.free_ptr; size_t buffer_size = decoder->scratch.free_size; IMAGE *g_image = lowpass_images[0]; IMAGE *rg_image = lowpass_images[1]; IMAGE *bg_image = lowpass_images[2]; IMAGE *gd_image = lowpass_images[3]; uint8_t *outyuv,*line = output; PIXEL *bayer_line, *bayerptr; PIXEL *G,*RG,*BG,*GD; int x,y; int bayer_pitch = info->width*4; int format = info->format; bool inverted = false; int maxbound = 4095; //10-bit source int midpoint = 32768>>3; int shift = 4; if(precision == 12) { maxbound = 16383; midpoint = 32768>>1; shift = 2; } if(buffer_size < info->width * 2 * 3 * 2) assert(0); // not enough memory if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32) { inverted = true; line += (info->height-1)*pitch; pitch = -pitch; } scanline = (unsigned short *)buffer; buffer += info->width * 2 * 3; scanline2 = (unsigned short *)buffer; G = g_image->band[0]; RG = rg_image->band[0]; BG = bg_image->band[0]; for(y=0; y<info->height; y++) { uint8_t *newline = line; PIXEL *newG=G,*newRG=RG,*newBG=BG; PIXEL *gptr,*rgptr,*bgptr,*gdptr; int r,g,b,rg,bg,y1,y2,u,v; int r1,g1,b1; int i; newline += pitch*y; newG += y * (g_image->pitch / sizeof(PIXEL)); newRG += y * (rg_image->pitch / sizeof(PIXEL)); newBG += y * (bg_image->pitch / sizeof(PIXEL)); gptr = newG; rgptr = newRG; bgptr = newBG; sptr = scanline; for(x=0; x<info->width; x++) { g = (*gptr++); if(g > maxbound) g = maxbound; rg = (*rgptr++); bg = (*bgptr++); r = (rg<<1) - midpoint + g; b = (bg<<1) - midpoint + g; if(r > maxbound) r = maxbound; if(b > maxbound) b = maxbound; if(r < 0) r = 0; if(g < 0) g = 0; if(b < 0) b = 0; *sptr++ = r<<shift; *sptr++ = g<<shift; *sptr++ = b<<shift; } { int flags = 0; int whitebitdepth = 16; sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, info->width, 1, sptr, newline, y, pitch, info->format, whitebitdepth, flags); } } #endif } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { IMAGE *g_image = lowpass_images[0]; IMAGE *rg_image = lowpass_images[1]; IMAGE *bg_image = lowpass_images[2]; uint8_t *line = output; unsigned char *rgb8; PIXEL *G,*RG,*BG; int x,y; G = g_image->band[0]; RG = rg_image->band[0]; BG = bg_image->band[0]; if(info->format == DECODED_FORMAT_RGB32) { line = output; line += (info->height-1) * pitch; for(y=0; y<info->height; y++) { PIXEL *gptr,*rgptr,*bgptr; int r,g,b; int i,noisearray[32]; for(i=0; i<32; i++) { noisearray[i] = (rand() & 63); } gptr = G; rgptr = RG; bgptr = BG; rgb8 = (unsigned char *)line; for(x=0; x<info->width; x++) { int rnd = noisearray[x&31]; g = ((*gptr++) + rnd) >> 6; r = ((*rgptr++) + rnd) >> 6; b = ((*bgptr++) + rnd) >> 6; if(r < 0) r=0; if(r > 255) r=255; if(g < 0) g=0; if(g > 255) g=255; if(b < 0) b=0; if(b > 255) b=255; *rgb8++ = b; *rgb8++ = g; *rgb8++ = r; *rgb8++ = 255; } line -= pitch; G += g_image->pitch / sizeof(PIXEL); RG += rg_image->pitch / sizeof(PIXEL); BG += bg_image->pitch / sizeof(PIXEL); } } else if(info->format == DECODED_FORMAT_RGB24) { line = output; line += (info->height-1) * pitch; for(y=0; y<info->height; y++) { PIXEL *gptr,*rgptr,*bgptr; int r,g,b; int i,noisearray[32]; for(i=0; i<32; i++) { noisearray[i] = (rand() & 63); } gptr = G; rgptr = RG; bgptr = BG; rgb8 = (unsigned char *)line; for(x=0; x<info->width; x++) { int rnd = noisearray[x&31]; g = ((*gptr++) + rnd) >> 6; r = ((*rgptr++) + rnd) >> 6; b = ((*bgptr++) + rnd) >> 6; if(r < 0) r=0; if(r > 255) r=255; if(g < 0) g=0; if(g > 255) g=255; if(b < 0) b=0; if(b > 255) b=255; *rgb8++ = b; *rgb8++ = g; *rgb8++ = r; } line -= pitch; G += g_image->pitch / sizeof(PIXEL); RG += rg_image->pitch / sizeof(PIXEL); BG += bg_image->pitch / sizeof(PIXEL); } } } else #endif { CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); } START(tk_inverse); #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int width = info->width; int height = info->height; sprintf(label, "Output"); DumpBufferStatistics(label, output, width, height, pitch, logfile); } #endif } // In full resolution, reconstruct the frame wavelet and // convert the YUYV output to the specified color format else { // Handle inversion of the output image in this routine FRAME_INFO info2; int format; bool inverted = false; int precision = codec->precision; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(resolution == DECODED_RESOLUTION_FULL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } //#if BUILD_PROSPECT // Output the frame in V210 foramt? if( (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64) && decoder->codec.encoded_format != ENCODED_FORMAT_BAYER ) { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; // The output buffer is an array of 10-bit pixels packed into double words #if 0 TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2, buffer, buffer_size, chroma_offset, decoder->codec.precision); #else TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #endif } else //#endif // Decoding a full resolution progressive frame to a Bayer output format? if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; // PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16); if(decoder->RawBayer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*decoded_height*4*sizeof(PIXEL); decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16); #endif decoder->RawBayerSize = info->width*decoded_height*4*sizeof(PIXEL); } //TODO: Replace this memory allocation with a scratch buffer allocation //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*decoded_height*4*3*sizeof(PIXEL); decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*3*sizeof(PIXEL), 16); #endif decoder->RGBFilterBufferSize = info->width*decoded_height*4*3*sizeof(PIXEL); } //#endif if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL) { decoder->error = CODEC_ERROR_MEMORY_ALLOC; return; } if(decoder->RawBayer16) { uint8_t *line; PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16; PIXEL16U *G,*RG,*BG,*GD; int x,y; int bayer_pitch = info->width*4; //float scale = 256.0; //int matrix_non_unity = 0; //int wb_non_unity = 0; //float curve2lin[2048]; //float lin2curve[2048+512+2]; #if 0 static float rgb2yuv[3][4] = { {0.183f, 0.614f, 0.062f, 16.0f/256.0f}, {-0.101f,-0.338f, 0.439f, 0.5f}, {0.439f,-0.399f,-0.040f, 0.5f} }; float mtrx[3][4] = { {1.0f, 0, 0, 0}, {0, 1.0f, 0, 0}, {0, 0, 1.0f, 0} }; float whitebalance[3] = { 1.0f, 1.0f, 1.0f }; #endif #if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions /* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2) { float fval = 0.0; int i; for(i=0; i<12; i++) { mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3]; if((i>>2) == (i&3)) { if(fval != 1.0) { matrix_non_unity = 1; } } else { if(fval != 0.0) { matrix_non_unity = 1; } } } // not active as VFW isn't yet support the 3D LUTs if(decoder->cfhddata.version >= 5) { int j; float encode_curvebase = 90.0; float decode_curvebase = 90.0; int encode_curve_type = decoder->cfhddata.encode_curve >> 16; int decode_curve_type = decoder->cfhddata.decode_curve >> 16; if(decoder->cfhddata.user_white_balance[0] > 0.0) { wb_non_unity = 1; whitebalance[0] = decoder->cfhddata.user_white_balance[0]; whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0; whitebalance[2] = decoder->cfhddata.user_white_balance[3]; } if(encode_curve_type) //1 or 2 encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); else { encode_curve_type = 1; encode_curvebase = 90.0; } if(decode_curve_type) //1 or 2 decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff); else { decode_curve_type = 1; decode_curvebase = 90.0; } for(j=0; j<2048; j++) { if(encode_curve_type == 1) curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase); else curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase); } for(j=-512; j<=2048; j++) // -1 to +4 { if(encode_curve_type == CURVE_TYPE_LOG) lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase); else lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase); } } }*/ #endif #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, chroma_offset, precision); #else // Decode that last transform to rows of Bayer data (one row per channel) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, &decoder->scratch, chroma_offset, precision); #endif if(resolution == DECODED_RESOLUTION_FULL_DEBAYER && (info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4)) { #if _THREADED //DemosaicRAW WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else assert(0) // old code disabled /* int bayer_format = decoder->cfhddata.bayer_format; unsigned char *outA8, *outB8; unsigned short *lineStartA16, *lineStartB16; unsigned short *lineA16, *lineB16; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++) { bayer_line = decoder->RawBayer16; bayer_line += bayer_pitch * y; if(y<info->height) { ColorDifference2Bayer(info->width, bayer_line, bayer_pitch, bayer_format); } if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline { unsigned short *delayptr = decoder->RawBayer16; delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES); BayerRippleFilter(info->width, delayptr, bayer_pitch, bayer_format, decoder->RawBayer16); } if(y>=DEMOSAIC_DELAYLINES) { int delay_y = y - DEMOSAIC_DELAYLINES; unsigned short *sptr, scanline[8192*3]; outA8 = line; line += pitch; outB8 = line; line += pitch; sptr = scanline; DebayerLine(info->width*2, info->height*2, delay_y*2, decoder->RawBayer16, bayer_format, sptr, sharpening); for(x=0; x<info->width*2; x++) { outA8[2] = *sptr++>>8; outA8[1] = *sptr++>>8; outA8[0] = *sptr++>>8; outA8+=3; } for(x=0; x<info->width*2; x++) { outB8[2] = *sptr++>>8; outB8[1] = *sptr++>>8; outB8[0] = *sptr++>>8; outB8+=3; } } }*/ #endif // _THREADED } else if(format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else assert(0) // old code disabled /* { int bayer_format = decoder->cfhddata.bayer_format; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { outA16 = (PIXEL16U *)line; line += pitch; outB16 = (PIXEL16U *)line; line += pitch; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; for(x=0; x<info->width; x++) { int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - 32768; r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; g1 = g + gd; g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output ) // stats1+=g1; // stats2+=g2; // statsd+=gd; if(r < 0) r = 0; if(g1 < 0) g1 = 0; if(g2 < 0) g2 = 0; if(b < 0) b = 0; if(r > 0xffff) r = 0xffff; if(g1 > 0xffff) g1 = 0xffff; if(g2 > 0xffff) g2 = 0xffff; if(b > 0xffff) b = 0xffff; switch(bayer_format) { case BAYER_FORMAT_RED_GRN: //Red-grn phase *outA16++ = r; *outA16++ = g1; *outB16++ = g2; *outB16++ = b; break; case BAYER_FORMAT_GRN_RED:// grn-red *outA16++ = g1; *outA16++ = r; *outB16++ = b; *outB16++ = g2; break; case BAYER_FORMAT_GRN_BLU: *outA16++ = g1; *outA16++ = b; *outB16++ = r; *outB16++ = g2; break; case BAYER_FORMAT_BLU_GRN: *outA16++ = b; *outA16++ = g1; *outB16++ = g2; *outB16++ = r; break; } } bayer_line += bayer_pitch; } if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY) { int bayer_format = decoder->cfhddata.bayer_format; for(y=2; y<info->height-3; y++) { int offset = pitch>>1; line = output; //0 line += pitch * y * 2; // If on a red line, move to a blue line if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN) line -= pitch; { int offset = pitch>>1; outA16 = (PIXEL16U *)line; outA16++; //g //for BAYER_FORMAT_RED_GRN input outA16++; //b outA16++; //g outA16++; //b //point to green pixel with *outA16 if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU) outA16++; for(x=2; x<info->width-2; x++) { int mn,mx,g; int range = 8*256; //1<<11 int shift = 11; int delta; int alpha; g = *outA16; // lines below do not need to be tested for a corrected value mn = mx = outA16[offset+1]; if(mn > outA16[offset-1]) mn = outA16[offset-1]; if(mx < outA16[offset-1]) mx = outA16[offset-1]; if((outA16[-offset-1] & 1)==0) { if(mn > outA16[-offset-1]) mn = outA16[-offset-1]; if(mx < outA16[-offset-1]) mx = outA16[-offset-1]; } if((outA16[-offset+1] & 1)==0) { if(mn > outA16[-offset+1]) mn = outA16[-offset+1]; if(mx < outA16[-offset+1]) mx = outA16[-offset+1]; } delta = mx - mn; if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx))) { int gmn,gmx; gmn = gmx = g; if((outA16[-2*offset-2] & 1)==0) { if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2]; if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2]; } if((outA16[-2*offset] & 1)==0) { if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset]; if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset]; } if((outA16[-2*offset+2] & 1)==0) { if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2]; if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2]; } if((outA16[-2] & 1)==0) { if(gmn > outA16[-2]) gmn = outA16[-2]; if(gmx < outA16[-2]) gmx = outA16[-2]; } // lines below do not need to be tested for a corrected value if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2]; if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2]; if(gmn > outA16[2*offset]) gmn = outA16[2*offset]; if(gmx < outA16[2*offset]) gmx = outA16[2*offset]; if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2]; if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2]; if(gmn > outA16[2]) gmn = outA16[2]; if(gmx < outA16[2]) gmx = outA16[2]; if((gmx - gmn) < range) { alpha = range;//delta; if(g > mx) { alpha *= (g-mx); //max range alpha >>= shift; } else // g < mn { alpha *= (mn-g); //max range alpha >>= shift; } alpha *= alpha; alpha >>= shift; // avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2; // *outA16 = avg; //good // *outA16 = mn; //spotty if( (abs(outA16[offset] - outA16[-offset]) < range) && ((abs(outA16[1] - outA16[-1]) < range))) { int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift; if(val > 0xffff) val = 0xffff; if(val < 0) val = 0; val |= 1; *outA16 = val; // *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute } } } outA16++; //g outA16++; //b } } } } }*/ #endif } // Pack the rows of Bayer data (full resolution progressive) into BYR3 format? else if (format == DECODED_FORMAT_BYR3) { PIXEL16U *outR, *outG1, *outG2, *outB; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; // #pragma omp parallel for for(y=0; y<info->height; y++) { uint8_t *line = output; PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16; line += pitch*2*y; bayerptr += bayer_pitch * y; outR = (PIXEL16U *)line; outG1 = outR + (pitch/4); outG2 = outR + (pitch/4)*2; outB = outR + (pitch/4)*3; G = (PIXEL16U *)bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; // Pack the rows of Bayer components into the BYR3 pattern #if (1 && XMMOPT) { __m128i *G_128 = (__m128i *)G; __m128i *RG_128 = (__m128i *)RG; __m128i *BG_128 = (__m128i *)BG; __m128i *GD_128 = (__m128i *)GD; __m128i *outR_128 = (__m128i *)outR; __m128i *outG1_128 = (__m128i *)outG1; __m128i *outG2_128 = (__m128i *)outG2; __m128i *outB_128 = (__m128i *)outB; __m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff); __m128i midpoint1 = _mm_set1_epi16(32768>>6); __m128i midpoint2 = _mm_set1_epi16(32768>>5); int column_step = 8; int post_column = (info->width) - ((info->width) % column_step); for (x=0; x < post_column; x += column_step) { __m128i r_128; __m128i g1_128; __m128i g2_128; __m128i b_128; __m128i g_128; __m128i rg_128; __m128i bg_128; __m128i gd_128; g_128 = _mm_load_si128(G_128++); rg_128 = _mm_load_si128(RG_128++); bg_128 = _mm_load_si128(BG_128++); gd_128 = _mm_load_si128(GD_128++); g_128 = _mm_srli_epi16(g_128, 6); rg_128 = _mm_srli_epi16(rg_128, 5); bg_128 = _mm_srli_epi16(bg_128, 5); gd_128 = _mm_srli_epi16(gd_128, 6); gd_128 = _mm_subs_epi16(gd_128, midpoint1); rg_128 = _mm_subs_epi16(rg_128, midpoint2); bg_128 = _mm_subs_epi16(bg_128, midpoint2); r_128 = _mm_adds_epi16(rg_128, g_128); b_128 = _mm_adds_epi16(bg_128, g_128); g1_128 = _mm_adds_epi16(g_128, gd_128); g2_128 = _mm_subs_epi16(g_128, gd_128); r_128 = _mm_adds_epi16(r_128, limiter); r_128 = _mm_subs_epu16(r_128, limiter); g1_128 = _mm_adds_epi16(g1_128, limiter); g1_128 = _mm_subs_epu16(g1_128, limiter); g2_128 = _mm_adds_epi16(g2_128, limiter); g2_128 = _mm_subs_epu16(g2_128, limiter); b_128 = _mm_adds_epi16(b_128, limiter); b_128 = _mm_subs_epu16(b_128, limiter); _mm_store_si128(outR_128++, r_128); _mm_store_si128(outG1_128++, g1_128); _mm_store_si128(outG2_128++, g2_128); _mm_store_si128(outB_128++, b_128); } G = (PIXEL16U *)G_128; RG = (PIXEL16U *)RG_128; BG = (PIXEL16U *)BG_128; GD = (PIXEL16U *)GD_128; outR = (PIXEL16U *)outR_128; outG1 = (PIXEL16U *)outG1_128; outG2 = (PIXEL16U *)outG2_128; outB = (PIXEL16U *)outB_128; } #endif for(; x<info->width; x++) { int r,g,b,rg,bg,gd,g1,g2; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - 32768; r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; g1 = g + gd; g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output ) if(r < 0) r = 0; if(g1 < 0) g1 = 0; if(g2 < 0) g2 = 0; if(b < 0) b = 0; if(r > 0xffff) r = 0xffff; if(g1 > 0xffff) g1 = 0xffff; if(g2 > 0xffff) g2 = 0xffff; if(b > 0xffff) b = 0xffff; //Red-grn phase *outR++ = r>>6; *outG1++ = g1>>6; *outG2++ = g2>>6; *outB++ = b>>6; } } } // Pack the rows of Bayer data (full resolution progressive) into BYR4 format? else if (format == DECODED_FORMAT_BYR4) { int bayer_format = decoder->cfhddata.bayer_format; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { outA16 = (PIXEL16U *)line; line += pitch; outB16 = (PIXEL16U *)line; line += pitch; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; for(x=0; x<info->width; x++) { //int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither; int32_t r, g, b, rg, bg, gd, g1, g2; // The output of the inverse transform is unsigned 16-bit integers const int midpoint = 32768; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - midpoint; r = ((rg - midpoint)<<1) + g; b = ((bg - midpoint)<<1) + g; g1 = g + gd; g2 = g - gd; r = SATURATE_16U(r); g1 = SATURATE_16U(g1); g2 = SATURATE_16U(g2); b = SATURATE_16U(b); // stats1+=g1; // stats2+=g2; // statsd+=gd; switch(bayer_format) { case BAYER_FORMAT_RED_GRN: //Red-grn phase *outA16++ = r; *outA16++ = g1; *outB16++ = g2; *outB16++ = b; break; case BAYER_FORMAT_GRN_RED:// grn-red *outA16++ = g1; *outA16++ = r; *outB16++ = b; *outB16++ = g2; break; case BAYER_FORMAT_GRN_BLU: *outA16++ = g1; *outA16++ = b; *outB16++ = r; *outB16++ = g2; break; case BAYER_FORMAT_BLU_GRN: *outA16++ = b; *outA16++ = g1; *outB16++ = g2; *outB16++ = r; break; default: // Unsupported Bayer format assert(0); *outA16++ = 0; *outA16++ = 0; *outB16++ = 0; *outB16++ = 0; break; } } bayer_line += bayer_pitch; } if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY) { for(y=2; y<info->height-3; y++) { //int offset = pitch>>1; line = output; //0 line += pitch * y * 2; // If on a red line, move to a blue line if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN) line -= pitch; { int offset = pitch>>1; outA16 = (PIXEL16U *)line; outA16++; //g //for BAYER_FORMAT_RED_GRN input outA16++; //b outA16++; //g outA16++; //b //point to green pixel with *outA16 if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU) outA16++; for(x=2; x<info->width-2; x++) { int mn,mx,g; int range = 8*256; //1<<11 int shift = 11; int delta; int alpha; g = *outA16; // lines below do not need to be tested for a corrected value mn = mx = outA16[offset+1]; if(mn > outA16[offset-1]) mn = outA16[offset-1]; if(mx < outA16[offset-1]) mx = outA16[offset-1]; if((outA16[-offset-1] & 1)==0) { if(mn > outA16[-offset-1]) mn = outA16[-offset-1]; if(mx < outA16[-offset-1]) mx = outA16[-offset-1]; } if((outA16[-offset+1] & 1)==0) { if(mn > outA16[-offset+1]) mn = outA16[-offset+1]; if(mx < outA16[-offset+1]) mx = outA16[-offset+1]; } delta = mx - mn; if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx))) { int gmn,gmx; gmn = gmx = g; if((outA16[-2*offset-2] & 1)==0) { if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2]; if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2]; } if((outA16[-2*offset] & 1)==0) { if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset]; if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset]; } if((outA16[-2*offset+2] & 1)==0) { if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2]; if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2]; } if((outA16[-2] & 1)==0) { if(gmn > outA16[-2]) gmn = outA16[-2]; if(gmx < outA16[-2]) gmx = outA16[-2]; } // lines below do not need to be tested for a corrected value if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2]; if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2]; if(gmn > outA16[2*offset]) gmn = outA16[2*offset]; if(gmx < outA16[2*offset]) gmx = outA16[2*offset]; if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2]; if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2]; if(gmn > outA16[2]) gmn = outA16[2]; if(gmx < outA16[2]) gmx = outA16[2]; if((gmx - gmn) < range) { alpha = range;//delta; if(g > mx) { alpha *= (g-mx); //max range alpha >>= shift; } else // g < mn { alpha *= (mn-g); //max range alpha >>= shift; } alpha *= alpha; alpha >>= shift; // avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2; // *outA16 = avg; //good // *outA16 = mn; //spotty if( (abs(outA16[offset] - outA16[-offset]) < range) && ((abs(outA16[1] - outA16[-1]) < range))) { int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift; if(val > 0xffff) val = 0xffff; if(val < 0) val = 0; val |= 1; *outA16 = val; // *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute } } } outA16++; //g outA16++; //b } } } } // Linear restore { unsigned short *buff = (unsigned short *)output; //static int pos = 0; for(y=0; y<info->height*2; y++) { for(x=0; x<info->width*2; x++) { float val = (float)buff[y*info->width*2 + x]/65535.0f; float encode_curvebase = 90.0; int encode_curve_type = CURVE_TYPE_LOG; int encode_curve_neg; if((decoder->cfhddata.encode_curve)>>16) //1 or 2 { encode_curve_type = (decoder->cfhddata.encode_curve)>>16; if(encode_curve_type & CURVE_TYPE_EXTENDED) encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases else encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); } if(encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR) encode_curve_type = CURVE_TYPE_LINEAR; encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE; switch(encode_curve_type & CURVE_TYPE_MASK) { case CURVE_TYPE_LOG: val = CURVE_LOG2LIN(val,encode_curvebase); break; case CURVE_TYPE_GAMMA: val = CURVE_GAM2LIN(val,encode_curvebase); break; case CURVE_TYPE_CINEON: val = CURVE_CINEON2LIN(val,encode_curvebase); break; case CURVE_TYPE_CINE985: val = CURVE_CINE9852LIN(val,encode_curvebase); break; case CURVE_TYPE_PARA: val = CURVE_PARA2LIN(val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)); break; case CURVE_TYPE_CSTYLE: val = CURVE_CSTYLE2LIN((float)val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)); break; case CURVE_TYPE_SLOG: val = CURVE_SLOG2LIN((float)val); break; case CURVE_TYPE_LOGC: val = CURVE_LOGC2LIN((float)val); break; case CURVE_TYPE_LINEAR: default: break; } buff[y*info->width*2 + x] = (int)(val*4095.0); } } } } else { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else //unsigned short scanline[8192*3],*sptr; //unsigned short scanline2[8192*3],*sptr2; unsigned short *scanline,*sptr; unsigned short *scanline2,*sptr2; char *buffer = decoder->scratch.free_ptr; size_t buffer_size = decoder->scratch.free_size; uint8_t *outyuv,*line = output; PIXEL *bayerptr; int x,y; if(buffer_size < info->width * 2 * 3 * 2) assert(0); // not enough memory scanline = (unsigned short *)buffer; buffer += info->width * 2 * 3; scanline2 = (unsigned short *)buffer; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { int r,g,b,rg,bg,y1,y2,u,v; int r1,g1,b1; int i; __m128i gggggggg,ggggggg2,rgrgrgrg,bgbgbgbg; __m128i rrrrrrrr,bbbbbbbb; __m128i mid8192 = _mm_set1_epi16(8192); __m128i mid16384 = _mm_set1_epi16(16384); __m128i mid32768 = _mm_set1_epi16(32768); __m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff-0x3fff); int sse2width = info->width & 0xfff8; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; sptr = scanline; x = 0; for(; x<sse2width; x+=8) { gggggggg = _mm_loadu_si128((__m128i *)G); G+=8; rgrgrgrg = _mm_loadu_si128((__m128i *)RG); RG+=8; bgbgbgbg = _mm_loadu_si128((__m128i *)BG); BG+=8; ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767 bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767 //limit to 0 to 16383 rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16); rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16); //limit to 0 to 16383 bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16); bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16); rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535 bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535 *sptr++ = _mm_extract_epi16(rrrrrrrr, 0); *sptr++ = _mm_extract_epi16(gggggggg, 0); *sptr++ = _mm_extract_epi16(bbbbbbbb, 0); *sptr++ = _mm_extract_epi16(rrrrrrrr, 1); *sptr++ = _mm_extract_epi16(gggggggg, 1); *sptr++ = _mm_extract_epi16(bbbbbbbb, 1); *sptr++ = _mm_extract_epi16(rrrrrrrr, 2); *sptr++ = _mm_extract_epi16(gggggggg, 2); *sptr++ = _mm_extract_epi16(bbbbbbbb, 2); *sptr++ = _mm_extract_epi16(rrrrrrrr, 3); *sptr++ = _mm_extract_epi16(gggggggg, 3); *sptr++ = _mm_extract_epi16(bbbbbbbb, 3); *sptr++ = _mm_extract_epi16(rrrrrrrr, 4); *sptr++ = _mm_extract_epi16(gggggggg, 4); *sptr++ = _mm_extract_epi16(bbbbbbbb, 4); *sptr++ = _mm_extract_epi16(rrrrrrrr, 5); *sptr++ = _mm_extract_epi16(gggggggg, 5); *sptr++ = _mm_extract_epi16(bbbbbbbb, 5); *sptr++ = _mm_extract_epi16(rrrrrrrr, 6); *sptr++ = _mm_extract_epi16(gggggggg, 6); *sptr++ = _mm_extract_epi16(bbbbbbbb, 6); *sptr++ = _mm_extract_epi16(rrrrrrrr, 7); *sptr++ = _mm_extract_epi16(gggggggg, 7); *sptr++ = _mm_extract_epi16(bbbbbbbb, 7); } for(; x<info->width; x++) { g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; if(r < 0) r = 0; if(r > 0xffff) r = 0xffff; if(g < 0) g = 0; if(g > 0xffff) g = 0xffff; if(b < 0) b = 0; if(b > 0xffff) b = 0xffff; *sptr++ = r; *sptr++ = g; *sptr++ = b; } { int flags = 0; int whitebitdepth = 16; sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch, info->format, whitebitdepth, flags); } line += pitch; bayer_line += bayer_pitch; } #endif } /* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201 // Pack the rows of Bayer data (full resolution progressive) into BYR2 format? else if (format == DECODED_FORMAT_YUYV) { line = output; bayer_line = decoder->RawBayer16; scale = 256.0; y_rmult = ((rgb2yuv[0][0]) * scale); y_gmult = ((rgb2yuv[0][1]) * scale); y_bmult = ((rgb2yuv[0][2]) * scale); y_offset= ((rgb2yuv[0][3]) * scale); u_rmult = ((rgb2yuv[1][0]) * scale); u_gmult = ((rgb2yuv[1][1]) * scale); u_bmult = ((rgb2yuv[1][2]) * scale); u_offset= ((rgb2yuv[1][3]) * scale); v_rmult = ((rgb2yuv[2][0]) * scale); v_gmult = ((rgb2yuv[2][1]) * scale); v_bmult = ((rgb2yuv[2][2]) * scale); v_offset= ((rgb2yuv[2][3]) * scale); r_rmult= (mtrx[0][0] * scale * whitebalance[0]); r_gmult= (mtrx[0][1] * scale * whitebalance[1]); r_bmult= (mtrx[0][2] * scale * whitebalance[2]); r_offset= (mtrx[0][3] * scale); g_rmult= (mtrx[1][0] * scale * whitebalance[0]); g_gmult= (mtrx[1][1] * scale * whitebalance[1]); g_bmult= (mtrx[1][2] * scale * whitebalance[2]); g_offset= (mtrx[1][3] * scale); b_rmult= (mtrx[2][0] * scale * whitebalance[0]); b_gmult= (mtrx[2][1] * scale * whitebalance[1]); b_bmult= (mtrx[2][2] * scale * whitebalance[2]); b_offset= (mtrx[2][3] * scale); for(y=0; y<info->height; y++) { outyuv = line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; u >>= 1; v >>= 1; y1 += y_offset; y2 += y_offset; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 255) y1 = 255; if(y2 < 0) y2 = 0; if(y2 > 255) y2 = 255; if(u < 0) u = 0; if(u > 255) u = 255; if(v < 0) v = 0; if(v > 255) v = 255; *outyuv++ = y1; *outyuv++ = u; *outyuv++ = y2; *outyuv++ = v; } line += pitch; bayer_line += bayer_pitch; } } else if (format == DECODED_FORMAT_YU64) { int shift = 14; PIXEL16U *outyuv64; line = output; bayer_line = decoder->RawBayer16; scale = 16384.0; //_mm_empty(); // Clear the mmx register state y_rmult = ((rgb2yuv[0][0]) * scale); y_gmult = ((rgb2yuv[0][1]) * scale); y_bmult = ((rgb2yuv[0][2]) * scale); y_offset= ((rgb2yuv[0][3]) * scale * 4.0); u_rmult = ((rgb2yuv[1][0]) * scale); u_gmult = ((rgb2yuv[1][1]) * scale); u_bmult = ((rgb2yuv[1][2]) * scale); u_offset= ((rgb2yuv[1][3]) * scale * 4.0); v_rmult = ((rgb2yuv[2][0]) * scale); v_gmult = ((rgb2yuv[2][1]) * scale); v_bmult = ((rgb2yuv[2][2]) * scale); v_offset= ((rgb2yuv[2][3]) * scale * 4.0); scale = 4096.0; r_rmult= (mtrx[0][0] * scale * whitebalance[0]); r_gmult= (mtrx[0][1] * scale * whitebalance[1]); r_bmult= (mtrx[0][2] * scale * whitebalance[2]); r_offset= (mtrx[0][3] * scale); g_rmult= (mtrx[1][0] * scale * whitebalance[0]); g_gmult= (mtrx[1][1] * scale * whitebalance[1]); g_bmult= (mtrx[1][2] * scale * whitebalance[2]); g_offset= (mtrx[1][3] * scale); b_rmult= (mtrx[2][0] * scale * whitebalance[0]); b_gmult= (mtrx[2][1] * scale * whitebalance[1]); b_bmult= (mtrx[2][2] * scale * whitebalance[2]); b_offset= (mtrx[2][3] * scale); y_offset += 26; u_offset += 26; v_offset += 26; for(y=0; y<info->height; y++) { outyuv64 = (PIXEL16U *)line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); u >>= 1; v >>= 1; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 65535) y1 = 65535; if(y2 < 0) y2 = 0; if(y2 > 65535) y2 = 65535; if(u < 0) u = 0; if(u > 65535) u = 65535; if(v < 0) v = 0; if(v > 65535) v = 65535; *outyuv64++ = y1; *outyuv64++ = v; *outyuv64++ = y2; *outyuv64++ = u; } line += pitch; bayer_line += bayer_pitch; } } else //RGBs { line = output; bayer_line = decoder->RawBayer16; scale = 256.0; r_rmult = (mtrx[0][0]) * scale * whitebalance[0]; r_gmult = (mtrx[0][1]) * scale * whitebalance[1]; r_bmult = (mtrx[0][2]) * scale * whitebalance[2]; r_offset= (mtrx[0][3]) * scale; g_rmult = (mtrx[1][0]) * scale * whitebalance[0]; g_gmult = (mtrx[1][1]) * scale * whitebalance[1]; g_bmult = (mtrx[1][2]) * scale * whitebalance[2]; g_offset= (mtrx[1][3]) * scale; b_rmult = (mtrx[2][0]) * scale * whitebalance[0]; b_gmult = (mtrx[2][1]) * scale * whitebalance[1]; b_bmult = (mtrx[2][2]) * scale * whitebalance[2]; b_offset= (mtrx[2][3]) * scale; for(y=0; y<info->height; y++) { int i,noisearray[32]; outyuv = line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = RG + bayer_pitch/4; for(i=0; i<32; i++) { noisearray[i] = (rand() & 127); } if(info->format == DECODED_FORMAT_RGB32) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; int r,g,b,g1,g2,gdiff,y1,y2,u,v; // g = (g1+g2)>>1; // *g_row_ptr++ = g; // *rg_row_ptr++ = (r-g+256)>>1; // *bg_row_ptr++ = (b-g+256)>>1; // *gdiff_row_ptr++ = (g1-g2+256)>>1; g = ((*G++)>>1); r = ((*RG++ + 64)>>0)-(256<<7)+g; b = ((*BG++ + 64)>>0)-(256<<7)+g; // gdiff = ((*GD++ + 64)>>7)-256+g; if(matrix_non_unity) { //TODO : need on convert to linear first. R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd; G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd; B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd; //TODO : need on convert back to log/display curve. } else { R1 = r + rnd; G1 = g + rnd; B1 = b + rnd; } R1 >>= 7; G1 >>= 7; B1 >>= 7; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; *outyuv++ = 255; } } else { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; int r,g,b,g1,g2,gdiff,y1,y2,u,v; //g = (g1+g2)>>1; // *g_row_ptr++ = g; // *rg_row_ptr++ = (r-g+256)>>1; // *bg_row_ptr++ = (b-g+256)>>1; // *gdiff_row_ptr++ = (g1-g2+256)>>1; g = ((*G++)>>1); r = ((*RG++ + 64)>>0)-(256<<7)+g; b = ((*BG++ + 64)>>0)-(256<<7)+g; // gdiff = ((*GD++ + 64)>>7)-256+g; if(matrix_non_unity) { //TODO: Need to convert to linear first. R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd; G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd; B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd; //TODO: Need to convert back to log/display curve. } else { R1 = r + rnd; G1 = g + rnd; B1 = b + rnd; } R1 >>= 7; G1 >>= 7; B1 >>= 7; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; } } line += pitch; bayer_line += bayer_pitch; } } */ //MEMORY_ALIGNED_FREE(RawBayer16); } } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { int precision = codec->precision; if(decoder->RawBayer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*info->height*num_channels*sizeof(PIXEL); decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*info->height*num_channels*sizeof(PIXEL), 16); #endif decoder->RawBayerSize = info->width*info->height*num_channels*sizeof(PIXEL); } //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int frame_size = info->width*decoded_height*4*3*sizeof(PIXEL); if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) frame_size = info->width*decoded_height*4*4*sizeof(PIXEL); #if _ALLOCATOR { ALLOCATOR *allocator = decoder->allocator; decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); } #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif decoder->RGBFilterBufferSize = frame_size; } //#endif if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL) { decoder->error = CODEC_ERROR_MEMORY_ALLOC; return; } //TODO: Replace this memory allocation with a scratch buffer allocation if(decoder->RawBayer16) { uint8_t *outyuv,*line, *source_line; PIXEL16U *bayerptr; PIXEL16U *G,*RG,*BG; int x,y; int src_pitch = info->width*num_channels*sizeof(PIXEL); int y_rmult,y_gmult,y_bmult,y_offset;//shift=8; int u_rmult,u_gmult,u_bmult,u_offset; int v_rmult,v_gmult,v_bmult,v_offset; float scale = 256.0; //int matrix_non_unity = 0; //int wb_non_unity = 0; //float curve2lin[2048]; //float lin2curve[2048+512+2]; static float rgb2yuv[3][4] = { {0.183f, 0.614f, 0.062f, 16.0f/256.0f}, {-0.101f,-0.338f, 0.439f, 0.5f}, {0.439f,-0.399f,-0.040f, 0.5} }; #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, src_pitch, info, chroma_offset, precision); #else TransformInverseSpatialToRow16u(transform_array, frame, num_channels, decoder->RawBayer16, src_pitch, info, &decoder->scratch, chroma_offset, precision); #endif if (format == DECODED_FORMAT_YUYV) { line = output; source_line = (unsigned char *)decoder->RawBayer16; scale = 256.0; y_rmult = (int)((rgb2yuv[0][0])); y_gmult = (int)((rgb2yuv[0][1])); y_bmult = (int)((rgb2yuv[0][2])); y_offset= (int)((rgb2yuv[0][3])); u_rmult = (int)((rgb2yuv[1][0])); u_gmult = (int)((rgb2yuv[1][1])); u_bmult = (int)((rgb2yuv[1][2])); u_offset= (int)((rgb2yuv[1][3])); v_rmult = (int)((rgb2yuv[2][0])); v_gmult = (int)((rgb2yuv[2][1])); v_bmult = (int)((rgb2yuv[2][2])); v_offset= (int)((rgb2yuv[2][3])); for(y=0; y<info->height; y++) { outyuv = line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; u >>= 1; v >>= 1; y1 += y_offset; y2 += y_offset; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 255) y1 = 255; if(y2 < 0) y2 = 0; if(y2 > 255) y2 = 255; if(u < 0) u = 0; if(u > 255) u = 255; if(v < 0) v = 0; if(v > 255) v = 255; *outyuv++ = y1; *outyuv++ = u; *outyuv++ = y2; *outyuv++ = v; } line += pitch; source_line += src_pitch; } } else if (format == DECODED_FORMAT_YU64) { int shift = 14; PIXEL16U *outyuv64; line = output; source_line = (unsigned char *)decoder->RawBayer16; scale = 16384.0; y_rmult = (int)((rgb2yuv[0][0]) * scale); y_gmult = (int)((rgb2yuv[0][1]) * scale); y_bmult = (int)((rgb2yuv[0][2]) * scale); y_offset= (int)((rgb2yuv[0][3]) * scale * 4.0f); u_rmult = (int)((rgb2yuv[1][0]) * scale); u_gmult = (int)((rgb2yuv[1][1]) * scale); u_bmult = (int)((rgb2yuv[1][2]) * scale); u_offset= (int)((rgb2yuv[1][3]) * scale * 4.0f); v_rmult = (int)((rgb2yuv[2][0]) * scale); v_gmult = (int)((rgb2yuv[2][1]) * scale); v_bmult = (int)((rgb2yuv[2][2]) * scale); v_offset= (int)((rgb2yuv[2][3]) * scale * 4.0f); scale = 4096.0; y_offset += 26; u_offset += 26; v_offset += 26; for(y=0; y<info->height; y++) { outyuv64 = (PIXEL16U *)line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); u >>= 1; v >>= 1; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 65535) y1 = 65535; if(y2 < 0) y2 = 0; if(y2 > 65535) y2 = 65535; if(u < 0) u = 0; if(u > 65535) u = 65535; if(v < 0) v = 0; if(v > 65535) v = 65535; *outyuv64++ = y1; *outyuv64++ = v; *outyuv64++ = y2; *outyuv64++ = u; } line += pitch; source_line += src_pitch; } } else //RGBs { line = output; source_line = (unsigned char *)decoder->RawBayer16; for(y=0; y<info->height; y++) { int i,noisearray[32]; unsigned short *rgb16 = (unsigned short *)line; outyuv = line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(i=0; i<32; i++) { noisearray[i] = (rand() & 255); } if(info->format == DECODED_FORMAT_RGB32) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; #if 0 G1 = (*G++) + rnd; R1 = ((*RG++<<1) - (128<<9)) + G1; B1 = ((*BG++<<1) - (128<<9)) + G1; #else G1 = (*G++) + rnd; R1 = (*RG++) + rnd; B1 = (*BG++) + rnd; #endif R1 >>= 8; G1 >>= 8; B1 >>= 8; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; *outyuv++ = 255; } } else if(info->format == DECODED_FORMAT_RGB24) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; #if 0 G1 = (*G++) + rnd; R1 = ((*RG++<<1) - (128<<9)) + G1; B1 = ((*BG++<<1) - (128<<9)) + G1; #else G1 = (*G++) + rnd; R1 = (*RG++) + rnd; B1 = (*BG++) + rnd; #endif R1 >>= 8; G1 >>= 8; B1 >>= 8; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; } } else if(info->format == DECODED_FORMAT_RG48) { for(x=0; x<info->width; x++) { int R1,G1,B1; G1 = (*G++); R1 = (*RG++); B1 = (*BG++); *rgb16++ = R1; *rgb16++ = G1; *rgb16++ = B1; } } line += pitch; source_line += src_pitch; } } //MEMORY_ALIGNED_FREE(RawBayer16); } } else // Output the frame in one of the RGB 8-bit formats { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; // Invert the bottom wavelet and convert the output to the requested color format #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sYUVtoRGB); #else TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #endif } } } #if TIMING // Count the number of progressive frames that were decoded progressive_decode_count++; #endif } STOP(tk_inverse); #ifdef ADOBE_MEMORY_FUNCTIONS if((decoder->RawBayer16 && decoder->RawBayerSize > 2048*1152*2) || (decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048*1152*2)) { #if _ALLOCATOR if(decoder->RawBayer16) { FreeAligned(decoder->allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = NULL; } if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; decoder->RGBFilterBufferSize = NULL; } #else if(decoder->RawBayer16) { MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = NULL; } if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; decoder->RGBFilterBufferSize = NULL; } #endif } #endif #if (0 && DEBUG) if (logfile) { //uint8_t *subimage = output; uint8_t *subimage = output + (2 * info->width) - 16; DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Exit ReconstructFrameToBuffer\n"); } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif } #if 0 // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, uint8_t *frame1, uint8_t *frame2, int output_pitch, FRAME_INFO *info, char *buffer, size_t buffer_size) { TRANSFORM **transform_array = decoder->transform; int output_width = info->width; int output_height = info->height; PIXEL *low_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *high_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *out1_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *out2_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *bufptr = (PIXEL *)buffer; uint8_t *output_row_ptr = output; int low_pitch[CODEC_MAX_CHANNELS]; int high_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Check that there is enough space for the intermediate results from each channel assert(output_width * sizeof(PIXEL) < buffer_size); // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *low_wavelet = transform_array[channel]->wavelet[3]; IMAGE *high_wavelet = transform_array[channel]->wavelet[2]; // Get the pointers to the first row in each lowpass band low_row_ptr[channel] = low_wavelet->band[0]; high_row_ptr[channel] = high_wavelet->band[0]; low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL); high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL); // Allocate space for one row of results for this channel channel_row_ptr[channel] = bufptr; bufptr += low_wavelet->width; } for (row = 0; row < output_height; row++) { char *bufptr = buffer; for (channel = 0; channel < num_channels; channel++) { // Invert the temporal transform at quarter resolution InvertTemporalQuarterRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel]); // Advance to the next row in each band for the temporal transform low_row_ptr[channel] += low_pitch[channel]; high_row_ptr[channel] += high_pitch[channel]; } // Pack the intermediate results into the output row ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width); // Advance the output row pointer output_row_ptr += output_pitch; } } #else // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, int frame_index, uint8_t *output, int output_pitch, FRAME_INFO *info, const SCRATCH *scratch, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif TRANSFORM **transform_array = decoder->transform; int output_width = info->width; int output_height = info->height; PIXEL *low_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *high_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int low_pitch[CODEC_MAX_CHANNELS]; int high_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Value used for filling the fourth channel in ARGB output int alpha = 255; int format = COLORFORMAT(info); int color_space = COLORSPACE(info); int decoded_format = DECODEDFORMAT(info); //bool inverted = false; // The pixels are descaled in the inverse temporal transform //const int descale = 0; // Shift the intermediate results to 16-bit pixels const int shift_yu64 = 8; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Initialize a pointer for allocating space in the buffer PIXEL *bufptr = (PIXEL *)buffer; // Array of pointers to the start of each channel in the intermediate results PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS]; // Check that there is enough space for the intermediate results from each channel #if DEBUG assert(output_width * sizeof(PIXEL) < buffer_size); #endif ComputeCube(decoder); // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *low_wavelet = transform_array[channel]->wavelet[4]; IMAGE *high_wavelet = transform_array[channel]->wavelet[3]; // Get the pointers to the first row in each lowpass band low_row_ptr[channel] = low_wavelet->band[0]; high_row_ptr[channel] = high_wavelet->band[0]; low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL); high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL); // Force the row of intermediate results to be properly aligned bufptr = (PIXEL *)ALIGN16(bufptr); // Allocate space for one row of results for this channel channel_row_ptr[channel] = bufptr; bufptr += low_wavelet->width; // Check that the row of intermediate results is properly aligned assert(ISALIGNED16(channel_row_ptr[channel])); } // Invert the image if required switch (decoded_format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: output_row_ptr += (output_height - 1) * output_pitch; output_pitch = NEG(output_pitch); } //HACK: Seems to work, I don't know why. //DAN20070304 if (precision == 12) precision = 8; // Apply the inverse temporal transform to the lowpass and highpass rows for (row = 0; row < output_height; row++) { // Most of the color conversion routines use zero descaling int descale = 0; //char *bufptr = buffer; for (channel = 0; channel < num_channels; channel++) { if (frame_index == 0) { // Invert the temporal transform at quarter resolution to get the even row InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel], output_width, precision); } else { assert(frame_index == 1); // Invert the temporal transform at quarter resolution to get the odd row InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel], output_width, precision); } // Advance to the next row in each band for the temporal transform low_row_ptr[channel] += low_pitch[channel]; high_row_ptr[channel] += high_pitch[channel]; } if(decoder->use_active_metadata_decoder) { uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes int i; FRAME_INFO info2; memcpy(&info2, info, sizeof(FRAME_INFO)); info2.height = 1; for(i=0;i<num_channels;i++) { channeldata[i] = (uint8_t *)channel_row_ptr[i]; channelpitch[i] = 0; } #if 1 { __m128i *Y = (__m128i *)channeldata[0]; __m128i *U = (__m128i *)channeldata[1]; __m128i *V = (__m128i *)channeldata[2]; __m128i v; int x; __m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff); for(x=0;x<info->width;x+=8) { v = _mm_load_si128(Y); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(Y++, v); } for(x=0;x<info->width/2;x+=8) { v = _mm_load_si128(U); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(U++, v); } for(x=0;x<info->width/2;x+=8) { v = _mm_load_si128(V); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(V++, v); } } #else //non SSE2 for(x=0;x<info->width*2;x++) { int val = *gptr++; if(val < 0) val = 0; if(val > 4095) val = 4095; val <<= 4; *src++ = val; } src = scanline2; #endif Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch, decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch); } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Convert the rows of luma and chroma into the output format switch(format) { case COLOR_FORMAT_YUYV: case COLOR_FORMAT_UYVY: // Pack the intermediate results into the output row if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { assert(0);//need quarter res BAYER To YUV decoder } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // assert(0);//need quarter res RGB To YUV decoder ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 10, info->colorspace, format); } else { ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format); } break; case COLOR_FORMAT_RGB24: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 10, 0); } else { // Convert the intermediate results into a row of RGB24 ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space); } break; case COLOR_FORMAT_RGB32: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL, output_width, output_row_ptr, output_pitch, info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/); } else { // Convert the intermediate results into a row of RGBA32 ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space, alpha); } break; case COLOR_FORMAT_YU64: case COLOR_FORMAT_V210: // Convert the intermediate results into a row of YU64 ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width, shift_yu64, precision, format); break; case COLOR_FORMAT_B64A: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); } else { ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, COLOR_FORMAT_B64A, color_space); } break; case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); } else { ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); } break; case COLOR_FORMAT_RG48: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_RG64: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; default: #if (1 && DEBUG) if (logfile) { fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format); } #endif assert(0); break; } } // Advance the output row pointer output_row_ptr += output_pitch; } } #endif #if 0 // Copy the quarter resolution lowpass channels from the spatial transform void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision) { int output_width = info->width; int output_height = info->height; PIXEL *input_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int input_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *wavelet = transform_array[channel]->wavelet[1]; // Get the pointers to the first row in each lowpass band input_row_ptr[channel] = wavelet->band[0]; input_pitch[channel] = wavelet->pitch / sizeof(PIXEL); } for (row = 0; row < output_height; row++) { // Descale and pack the pixels in each output row CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision); // Advance the input row pointers for (channel = 0; channel < num_channels; channel++) { input_row_ptr[channel] += input_pitch[channel]; } // Advance the output row pointer output_row_ptr += output_pitch; } } #endif // Convert the quarter resolution lowpass channels to the specified output format void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision) { int output_width = info->width; int output_height = info->height; PIXEL *input_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int input_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Value used for filling the fourth channel in ARGB output int alpha = 255; int format = COLORFORMAT(info); int color_space = COLORSPACE(info); int decoded_format = DECODEDFORMAT(info); //bool inverted = false; // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the wavelets with quarter resolution const int wavelet_index = 1; IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index]; // The wavelet should have been reconstructed assert(wavelet != NULL); // The lowpass band should be valid assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0); // Get the pointers to the first row in each lowpass band input_row_ptr[channel] = wavelet->band[0]; input_pitch[channel] = wavelet->pitch / sizeof(PIXEL); } // Invert the image if required switch (decoded_format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: output_row_ptr += (output_height - 1) * output_pitch; output_pitch = NEG(output_pitch); } ComputeCube(decoder); //HACK DAN20110122 -- some formats will not directly decode so need to use the AM route { if( format == COLOR_FORMAT_YU64 || format == COLOR_FORMAT_V210 || format == COLOR_FORMAT_R408 || format == COLOR_FORMAT_V408) { if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } } if(decoder->use_active_metadata_decoder) { #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_row_ptr; mailbox->pitch = output_pitch; mailbox->framenum = 0; for(channel = 0; channel < num_channels; channel++) { mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel]; mailbox->channelpitch[channel] = input_pitch[channel]*sizeof(PIXEL); } memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Convert each row to the specified output format for (row = 0; row < output_height; row++) { // Right shift for converting lowpass coefficients to pixels int descale = 4; switch(format & 0x7fffffff) { case COLOR_FORMAT_YUYV: case COLOR_FORMAT_UYVY: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // assert(0);//need quarter res RGB To YUV decoder ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 14, info->colorspace, format); } else { // Descale and pack the pixels in each output row CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision, format); } break; case COLOR_FORMAT_RGB24: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 14, 0); } else { // Convert the intermediate results into a row of RGB24 ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space); } break; case COLOR_FORMAT_RGB32: case COLOR_FORMAT_RGB32_INVERTED: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3], output_width, output_row_ptr, output_pitch, info->width, 1, 14, 0, num_channels); } else { // Convert the intermediate results into a row of RGBA32 ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space, alpha); } break; case COLOR_FORMAT_YU64: case COLOR_FORMAT_V210: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { //TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122 // } else { // Convert the intermediate results into a row of YU64 ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format); } break; case COLOR_FORMAT_B64A: // Convert the intermediate results to a row of ARGB with 16 bits per pixel descale = 2; ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: // Convert the intermediate results to a row of ARGB with 16 bits per pixel descale = 2; ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); break; case COLOR_FORMAT_RG48: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_RG64: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; default: assert(0); break; } // Advance the input row pointers for (channel = 0; channel < num_channels; channel++) { input_row_ptr[channel] += input_pitch[channel]; } // Advance the output row pointer output_row_ptr += output_pitch; } } } // Release all resources allocated by the decoder void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms) { #if _TIMING && 0 FILE *logfile = decoder->logfile; uint32_t frame_count = decoder->frame_count; if (logfile != NULL && frame_count > 0)\ { #ifdef _WIN32 PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME); #else PrintStatistics(logfile, frame_count, NULL, NULL); #endif } #endif // Free the data structures allocated for decoding ClearDecoder(decoder); } void DecodeForceMetadataRefresh(DECODER *decoder) { CFHDDATA *cfhddata = &decoder->cfhddata; cfhddata->force_metadata_refresh = true; if (decoder->parallelDecoder) { cfhddata = &decoder->parallelDecoder->cfhddata; cfhddata->force_metadata_refresh = true; } } void SetDecoderFlags(DECODER *decoder, uint32_t flags) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // Set the decoder flags decoder->flags = flags; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags); } #endif } void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution) { // Need to modify the codec to use the decoding format decoder->frame.width = width; decoder->frame.height = height; if(format == DECODED_FORMAT_WP13) { decoder->frame.output_format = format; //decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively. decoder->frame.format = format; //decoder->frame.signed_pixels = 1; decoder->frame.white_point = 13; } else if(format == DECODED_FORMAT_W13A) { decoder->frame.output_format = format; // decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64 decoder->frame.format = format; //decoder->frame.signed_pixels = 1; decoder->frame.white_point = 13; } else { decoder->frame.output_format = format; decoder->frame.format = format; //decoder->frame.signed_pixels = 0; decoder->frame.white_point = 16; } decoder->frame.resolution = resolution; decoder->frame.pixel_size = PixelSize(decoder->frame.format); } void SetDecoderCapabilities(DECODER *decoder) { int processor_count; #ifdef _WIN32 int limit_cpus = 32; #else int limit_cpus = 32; // AJA spins off too many #endif // Set the capabilities that are most likely supported by the Intel Mac decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2); if (decoder->thread_cntrl.limit) { limit_cpus = decoder->thread_cntrl.limit; } else if (decoder->thread_cntrl.affinity) { int i; const int max_cpu_count = 32; limit_cpus = 0; for (i = 0; i < max_cpu_count; i++) { if (decoder->thread_cntrl.affinity & (1<<i)) { limit_cpus++; } } } // Set the number of processors processor_count = GetProcessorCount(); if(processor_count > limit_cpus) processor_count = limit_cpus; #if (0 && DEBUG) // Set the number of processors (for debugging) //processor_count = 8; processor_count = 1; fprintf(stderr, "Limit processors to %d\n", processor_count); #endif decoder->thread_cntrl.capabilities |= (processor_count << 16); } int GetDecoderCapabilities(DECODER *decoder) { return decoder->thread_cntrl.capabilities; } bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags) { if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE) { decoder->frame.colorspace = color_flags; // Indicate that the color flags were set as specified return true; } // The specified color flags were not valid return false; } // Compute the resolution corresponding to the specified combination of input and output dimensions int DecodedResolution(int input_width, int input_height, int output_width, int output_height) { int decoded_width; int decoded_height; // Output height can be negative for inverted RGB output_height = abs(output_height); if (output_width == input_width && output_height == input_height) { return DECODED_RESOLUTION_FULL; } // Compute the dimensions for half resolution decoding decoded_width = input_width / 2; decoded_height = input_height / 2; // Do the output dimensions correspond to half resolution decoding? if (output_width == decoded_width && output_height == decoded_height) { return DECODED_RESOLUTION_HALF; } // Compute the dimensions for quarter resolution decoding decoded_width /= 2; decoded_height /= 2; // Do the output dimensions correspond to half resolution decoding? if (output_width == decoded_width && output_height == decoded_height) { return DECODED_RESOLUTION_QUARTER; } return DECODED_RESOLUTION_UNSUPPORTED; } // Compute the decoded resolution that is closest to the output dimensions int DecodedScale(int input_width, int input_height, int output_width, int output_height) { int decoded_width = input_width; int decoded_height = input_height; static int decodedResolution[] = { DECODED_RESOLUTION_FULL, DECODED_RESOLUTION_HALF, DECODED_RESOLUTION_QUARTER }; int reduction = 0; int max_reduction = 2; // Output height can be negative for inverted RGB output_height = abs(output_height); #if 1 // Always decode to the next larger size while (decoded_width > output_width && decoded_height > output_height && reduction < max_reduction) { // Decode to a frame size that is larger than the output image int reduced_width = decoded_width / 2; int reduced_height = decoded_height / 2; if (reduced_width >= output_width && reduced_height >= output_height) { decoded_width = reduced_width; decoded_height = reduced_height; reduction++; } else { break; } } #else while (decoded_width*4 > output_width*5 && decoded_height*4 > output_height*5 && reduction < max_reduction) { #if 0 // Decode to a frame size that is larger than the output image int reduced_width = decoded_width / 2; int reduced_height = decoded_height / 2; if (reduced_width >= output_width && reduced_height >= output_height) { decoded_width = reduced_width; decoded_height = reduced_height; reduction++; } else { break; } #else // Better to scale up a smaller image than scale down a larger image decoded_width /= 2; decoded_height /= 2; reduction++; #endif } #endif // Check that the decoded resolution is valid assert(0 <= reduction && reduction <= max_reduction); return decodedResolution[reduction]; } void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution, int *decoded_width_out, int *decoded_height_out) { switch (decoded_resolution) { default: assert(0); case DECODED_RESOLUTION_FULL: *decoded_width_out = encoded_width; *decoded_height_out = encoded_height; break; case DECODED_RESOLUTION_HALF: *decoded_width_out = encoded_width / 2; *decoded_height_out = encoded_height / 2; break; case DECODED_RESOLUTION_QUARTER: *decoded_width_out = encoded_width / 4; *decoded_height_out = encoded_height / 4; break; case DECODED_RESOLUTION_LOWPASS_ONLY: //TODO: Check that the lowpass dimensions are correct *decoded_width_out = encoded_width / 8; *decoded_height_out = encoded_height / 8; break; } } // Return true if the specified resolution is supported bool IsDecodedResolution(int resolution) { if (resolution == DECODED_RESOLUTION_QUARTER) { return true; } return (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF); } // Return true if the encoded sample is a key frame bool IsSampleKeyFrame(uint8_t *sample, size_t size) { bool key_frame_flag = false; // Search the first twenty tags for the sample type const int num_tags = 20; int i; BITSTREAM bitstream; InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ); for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE)) { TAGVALUE segment = GetSegment(&bitstream); if (segment.tuple.tag == CODEC_TAG_SAMPLE) { switch (segment.tuple.value) { case SAMPLE_TYPE_GROUP: case SAMPLE_TYPE_FIRST: case SAMPLE_TYPE_IFRAME: key_frame_flag = true; break; case SAMPLE_TYPE_SEQUENCE_HEADER: case SAMPLE_TYPE_FRAME: case SAMPLE_TYPE_SECOND: case SAMPLE_TYPE_PFRAME: default: key_frame_flag = false; break; case SAMPLE_TYPE_GROUP_TRAILER: case SAMPLE_TYPE_NONE: case SAMPLE_TYPE_ERROR: case SAMPLE_TYPE_CHANNEL: assert(0); // Unexpected situation key_frame_flag = false; // Report the sample as a non-key frame break; } break; // Found the sample type } } return key_frame_flag; } // Return the number of the more recent decoded frame uint32_t DecodedFrameNumber(DECODER *decoder) { CODEC_STATE *codec = &decoder->codec; if (decoder == NULL) return 0; return codec->frame_number; } /***** Start of the new code for the finite state machine (FSM) decoder *****/ #if _PROCESSOR_DISPATCH __declspec(cpu_dispatch(Pentium_4,Generic)) static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { // Stub routine for processor specific dispatch } #endif #if _PROCESSOR_GENERIC #if _PROCESSOR_DISPATCH __declspec(cpu_specific(Generic)) #endif // This version assumes that the row is a multiple of 8 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { int count; // Check that the row starts on a 16-byte boundary //assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 8 byte blocks assert(ISALIGNED(length, 8)); // Convert the length from pixels to 8-byte blocks count = (length >> 3); // This code assumes that at least one 8-byte block will be zeroed assert(count > 0); __asm { pxor mm0, mm0 // Zero a 16 byte register mov eax, rowptr // Load the pointer to the memory block mov ebx, count // Load the count of 8-byte blocks loop: movq [eax], mm0 // Write 8 bytes of zeros add eax, 8 // Advance to the next 8 byte block sub ebx, 1 // Decrement the number of blocks jg loop } //_mm_empty(); } #endif #if _PROCESSOR_PENTIUM_4 #if _PROCESSOR_DISPATCH __declspec(cpu_specific(Pentium_4)) #endif #ifndef _WIN64 // This version assumes that the row is a multiple of 16 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { int count; // Check that the row starts on a 16-byte boundary assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 16 byte blocks assert(ISALIGNED(length, 16)); // Convert the length from pixels to 16-byte blocks count = (length >> 4); // This code assumes that at least one 16-byte block will be zeroed assert(count > 0); #if 1 //DANREMOVE memset(rowptr, 0, length); #else __asm { pxor xmm0, xmm0 // Zero a 16 byte register mov eax, rowptr // Load the pointer to the memory block mov ebx, count // Load the count of 16-byte blocks loop: movdqa [eax], xmm0 // Write 16 bytes of zeros add eax, 16 // Advance to the next 16 byte block sub ebx, 1 // Decrement the number of blocks jg loop } #endif } #else // This version assumes that the row is a multiple of 16 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { // Check that the row starts on a 16-byte boundary assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 16 byte blocks assert(ISALIGNED(length, 16)); memset(rowptr, 0, length); } #endif #endif #if (0 && _DEBUG) // Functions for the finite state machine decoder (debug version) static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index) { // Return the address of the next table entry in the finite state machine return &fsm->next_state[index]; } static void ResetFSM(FSM *fsm) { // Reset the state to the beginning of the finite state machine entries fsm->next_state = fsm->entries; } static void UpdateFSM(FSM *fsm, int next) { // Change the state pointer to the next block of table entries fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE); } #else // Macros for the finite state machine decoder #if _INDIVIDUAL_LUT #define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index #define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0] #define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next] #define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index] #define ResetFSMIndividual(fsm) fsm->next_state_index = 0 #define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next #else #define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index #define ResetFSM(fsm) fsm->next_state = fsm->table.entries #define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE) #endif #endif #if _DEBUG static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = entry->value0 / 32; int value1 = entry->value1 / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); } static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = (entry->values >> 16) / 32; int value1 = (entry->values & 0xFFFF) / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); } static void DebugOutputFSM(FSM *fsm) { int num_entries = FSM_INDEX_ENTRIES; int i; for (i = 0; i < num_entries; i++) { FSMENTRY *entry = &fsm->table.entries[0][i]; int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); } } static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = entry->value0 / 32; int value1 = entry->value1 / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); if (logfile) { fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip); } } static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = (entry->values >> 16) / 32; int value1 = (entry->values & 0xFFFF) / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); if (logfile) { fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip); } } #endif static inline int GetFastByte(BITSTREAM *stream) { // Inline of the third case of GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream int byte = (uint32_t )(*(lpCurrentWord++)); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord; #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed--; #endif // Check that the high bits are zero assert((byte & ~BITMASK(8)) == 0); return byte; } #if 0 static inline int GetFastShort(BITSTREAM *stream) { // Adaptation of the code in GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream int byte = (uint32_t )(lpCurrentWord[0]); int word = (byte << 8) | (uint32_t )(lpCurrentWord[1]); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord+2; // Check that the high bits are zero assert((word & ~BITMASK(16)) == 0); return word; } #endif // Must declare the byte swap function even though it is an intrinsic //int _bswap(int); #if 0 static inline int GetFastLong(BITSTREAM *stream) { uint32_t *lpCurrentWord = (uint32_t *)stream->lpCurrentWord; int word = *(lpCurrentWord)++; //word = _bswap(word); word = SwapInt32BtoN(word); stream->lpCurrentWord = (uint8_t *)lpCurrentWord; return word; } #endif #if 0 //DAN20041030 not used // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding bool DecodeBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization) { int index, byte; FSMENTRY *entry; PIXEL *rowptr = image; int column = 0; int32_t value; size_t bytes_row_size = width * sizeof(PIXEL); PIXEL *maxptr; int length = width * sizeof(PIXEL); //ROI roi = {width, 1}; // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL); // Compute the address of the row after the last row in the band maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes length = ALIGN16(length); #if (0 && DEBUG) zerorow_count = 0; #endif ZeroHighPassRow(rowptr, length); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow(rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there is only one decoded magnitude value else if(entry->value1 == 0) { // Undo quantization and scaling value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry->value0; rowptr[column++] = SATURATE(value); value = quantization * entry->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry->value0; rowptr[column] = SATURATE(value); value = quantization * entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); column = 0; rowptr[column++] = SATURATE(value); } } // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow(rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { // Undo quantization and scaling int32_t value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry->value0; rowptr[column++] = SATURATE(value); value = quantization * entry->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry->value0; rowptr[column] = SATURATE(value); value = quantization * entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); column = 0; rowptr[column++] = SATURATE(value); } } } } #endif // Decode a subband of highpass coefficients using a finite state machine. // One byte is read from the bitstream each time and decoded in two steps. // New version that uses a buffer aligned to the cache for decoding. #if 0 static inline void ZeroHighPassBuffer(PIXEL *ptrCacheLines, int numCacheLines) { // This routine assume that the cache line size is 64 bytes assert(_CACHE_LINE_SIZE == 64); // This routine assumes that the input pointer is aligned to a cache line assert(ISALIGNED(ptrCacheLines, _CACHE_LINE_SIZE)); // This routine assumes that at least one cache line will be written assert(numCacheLines > 0); #if __GNUC__ memset(ptrCacheLines, 0, numCacheLines * _CACHE_LINE_SIZE); #else __asm { pxor xmm0, xmm0 // Zero a 16 byte register mov eax, ptrCacheLines // Load the pointer to the memory block mov ebx, numCacheLines // Load the count of the number of cache lines loop: movdqa [eax], xmm0 // Write 64 bytes of zeros using aligned stores movdqa [eax+16], xmm0 movdqa [eax+32], xmm0 movdqa [eax+48], xmm0 add eax, 64 // Advance to the next cache line sub ebx, 1 // Decrement the number of cache lines jg loop } #endif // The routine returns the pointer to the cache line after zeroing the block } #endif #if 0 static inline void CopyRowBuffer(char *rowptr, PIXEL *buffer, int length) { // Note that the length is in units of bytes (not pixels) int count; // Number of 16-byte blocks to copy // Check that the row length is an integer multiple of 16-byte blocks assert(ISALIGNED(length, 16)); // Convert the row length to the number of 16-byte blocks to copy count = length >> 4; // This routine assumes that at least one 16 byte block will be copied assert(count > 0); #if __GNUC__ // Use standard memory copy memcpy(rowptr, buffer, length); #else // Copy a multiple of 16 byte blocks __asm { mov eax, rowptr // Load the pointer to the destination mov ebx, buffer // Load the pointer to the source mov ecx, count // Load the number of 16-byte blocks to copy loop: movdqa xmm0, [ebx] // Load 16 bytes from the source movntdq [eax], xmm0 // Copy 16 bytes to the destination add eax, 16 // Advance to the group of 16 bytes add ebx, 16 sub ecx, 1 // Decrement the number of blocks to copy jg loop } #endif } #endif // DecodeBandFSMBuffered is no longer used #if 0 //dan20041030 not used bool DecodeBandFSMBuffered(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization, char *decoding_buffer, size_t decoding_buffer_size) { char *rowptr = (char *)image; // Pointer to current row char *maxptr = rowptr + height * pitch; // Address of row after the last row FSMENTRY *entry; int index; int byte; int column = 0; int32_t value; size_t row_size; size_t cache_row_size; // Size of a row in bytes int cache_line_count; // Size of the buffer in cache lines PIXEL *buffer; // Pixel pointer to the buffer int length; // Length of row in bytes // Check that the processing size allows two chunks per byte assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); // The bitstream buffer should be empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Compute the number of cache lines used in the buffer row_size = width * sizeof(PIXEL); cache_row_size = ALIGN(row_size, _CACHE_LINE_SIZE); cache_line_count = (cache_row_size >> _CACHE_LINE_SHIFT); // Check that the buffer is large enough assert(decoding_buffer != NULL && decoding_buffer_size >= cache_row_size); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(decoding_buffer, _CACHE_LINE_SIZE)); // This routine assumes that the rows are contiguous and the pitch is a multiple of 16 bytes length = pitch; assert(length == ALIGN(row_size, 16)); // Cast the buffer pointer for pixel access buffer = (PIXEL *)decoding_buffer; // Zero the decoding buffer ZeroHighPassBuffer(buffer, cache_line_count); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Copy the buffer to the row if not already beyond the band if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length); // Advance to the next row rowptr += pitch; // Zero the remaining rows in the subband while (rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } // Reset the finite state machine to the root node in the Huffman tree ResetFSM(fsm); // Return indication that the band was fully decoded return true; } // Set the finite state machine to the next state in the Huffman tree UpdateFSM(fsm, entry->next_state); // No magnitude values decoded? if (entry->value0 == 0) { // No magnitudes decoded so just advance the column pointer column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // Only one magnitude value decoded? else if (entry->value1 == 0) { // Process the magnitude value that was decoded // Undo quantization and scaling value = quantization * entry->value0; // Advance to the column where the value should be placed column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan buffer[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } else // Two magnitude values were decoded { // Check the column before storing values assert(0 <= column && column < width); if (column < width - 1) { // Dequantize and store the first value value = quantization * entry->value0; buffer[column++] = SATURATE(value); // Dequantize and store the second value value = quantization * entry->value1; buffer[column++] = SATURATE(value); } else { // Dequantize and store the first value in the current row value = quantization * entry->value0; buffer[column] = SATURATE(value); // Dequantize the second value value = quantization * entry->value1; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); // Reset the column to the beginning of the row column = 0; // Store the second value in the new row buffer[column++] = SATURATE(value); } } // Decode the second 4-bit chunk index = byte & FSM_INDEX_MASK; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Copy the buffer to the row if not already beyond the band if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length); // Advance to the next row rowptr += pitch; // Zero the remaining rows in the subband while (rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } // Reset the finite state machine to the root node in the Huffman tree ResetFSM(fsm); // Return indication that the band was fully decoded return true; } // Set the finite state machine to the next state in the Huffman tree UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { // Undo quantization and scaling int32_t value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan buffer[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if (column < width-1) { value = quantization * entry->value0; buffer[column++] = SATURATE(value); value = quantization * entry->value1; buffer[column++] = SATURATE(value); } else { value = quantization * entry->value0; buffer[column] = SATURATE(value); value = quantization * entry->value1; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); // Reset the column to the beginning of the row column = 0; buffer[column++] = SATURATE(value); } } } } #endif #if 0 //dan20041030 not used // Decode a subband using FSM, combine the two results decoded from one byte bool DecodeBandFSMCombined(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization) { int index, skip; uint8_t byte; FSMENTRY *entry1, *entry2; PIXEL *rowptr = image; int row = 0, column = 0; int32_t value,bytes_row_size = width*sizeof(PIXEL); PIXEL *maxptr = rowptr + height*pitch; // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); ZeroHighPassRow(rowptr, width); // Double check that the bitstream buffer is empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream //byte = GetBits(stream, BITSTREAM_WORD_SIZE); #if 0 byte = GetByte(stream); if (stream->error != BITSTREAM_ERROR_OKAY) { stream->error = VLC_ERROR_NOTFOUND; return false; } #else // Inline of the third case of GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream byte = (uint32_t )(*(lpCurrentWord++)); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord; // Check that the high bits are zero assert((byte & ~BITMASK(8)) == 0); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; entry1 = GetFSMTableEntry(fsm, index); UpdateFSM(fsm, entry1->next_state); // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); entry2 = GetFSMTableEntry(fsm, index); UpdateFSM(fsm, entry2->next_state); // Return when the subband is completely decoded if(entry1->value0 == BAND_END_TRAILER || entry2->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // If no magnitude value is decoded at the first step if (entry1->value0 == 0) { // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry1->pre_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else if(entry2->value1 == 0) { // Skip to the non-zero position column += entry1->pre_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Undo quantization and scaling value = quantization * entry2->value0; // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If two magnitudes are decoded at the second step else { column += entry1->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry2->value0; rowptr[column++] = SATURATE(value); value = quantization * entry2->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry2->value0; rowptr[column] = SATURATE(value); value = quantization * entry2->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } } } // If only one magnitude is decoded at the first step else if(entry1->value1 == 0) { // Undo quantization and scaling value = quantization * entry1->value0; column += entry1->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry1->post_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else if (entry2->value1 == 0) { // Undo quantization and scaling value = quantization * entry2->value0; column += entry1->post_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If two magnitudes are decoded at the second step else { column += entry1->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry2->value0; rowptr[column++] = SATURATE(value); value = quantization * entry2->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry2->value0; rowptr[column] = SATURATE(value); value = quantization * entry2->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } } } // If two magnitudes are decoded at the first step else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry1->value0; rowptr[column++] = SATURATE(value); value = quantization * entry1->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry1->value0; rowptr[column] = SATURATE(value); value = quantization * entry1->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } // If two magnitudes are decoded at the first step // then at most one more magnitude can be decoded at the second step assert(entry2->value1 == 0); // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry2->pre_skip; // entry2->pre_skip <=4 must be true // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else { column += entry2->pre_skip; // must be a small zero run // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if (rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Undo quantization and scaling value = quantization * entry2->value0; // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if (rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } } } } #endif #if 0 //dan20041030 not used // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding bool DecodeBandFSM8s(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; int column = 0; int32_t value; PIXEL8S *maxptr; int length = width * sizeof(PIXEL8S); //ROI roi = {width, 1}; // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL8S); // Compute the address of the row after the last row in the band maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes length = ALIGN16(length); ZeroHighPassRow((PIXEL *)rowptr, length); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there is only one decoded magnitude value else if(entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE8S(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = entry->value0; rowptr[column++] = SATURATE8S(value); value = entry->value1; rowptr[column++] = SATURATE8S(value); } else { value = entry->value0; rowptr[column] = SATURATE8S(value); value = entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); column = 0; rowptr[column++] = SATURATE8S(value); } } // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE8S(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = entry->value0; rowptr[column++] = SATURATE8S(value); value = entry->value1; rowptr[column++] = SATURATE8S(value); } else { value = entry->value0; rowptr[column] = SATURATE8S(value); value = entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); column = 0; rowptr[column++] = SATURATE8S(value); } } } } #endif // same as DecodeBandFSM8sNoGap but output to 16bit data bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant) { int index, byte; FSMENTRY *entry; PIXEL *rowptr = (PIXEL *)image; PIXEL16S *bandendptr; int value; #if ERROR_TOLERANT uint8_t *startCurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; #endif #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif if (image == NULL) { return false; } // Reset the decoder ResetFSM(fsm); pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while((intptr_t)bandendptr - (intptr_t)rowptr >= 0) #else for (;;) #endif { // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); goto SecondPass; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); goto SecondPass; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } SecondPass: rowptr = (PIXEL16S *)image; AlignBits(stream); AlignBitsTag(stream); stream->lpCurrentWord += 4; stream->nWordsUsed -= 4; // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while((intptr_t)bandendptr - (intptr_t)rowptr >= 0) #else for (;;) #endif { // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] |= value << 8; // Write down the second decoded magnitude value = entry->value1; rowptr[1] |= value << 8; // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] |= value << 8; // Write down the second decoded magnitude value = entry->value1; rowptr[1] |= value << 8; // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } // Same as DecodeBandFSM8sNoGap but output to 16bit data #if _DEBUG bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile) #else bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch) #endif { int index, byte; FSMENTRY *entry; FSMENTRYFAST *entryfast; PIXEL16S *rowptr = image; PIXEL16S *bandendptr; PIXEL16S *fastendptr; int32_t value; uint8_t *startCurrentWord = stream->lpCurrentWord; uint8_t *CurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; ptrdiff_t offset; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif #if (0 && DEBUG) DebugOutputBitstreamPosition(stream); DebugOutputBitstreamBytes(stream, 16); #endif // Reset the decoder ResetFSM(fsm); #if (0 && DEBUG) DebugOutputFSM(fsm); #endif pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); //memset(rowptr, 0, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif fastendptr = bandendptr; fastendptr -= 500; // Decode runs and magnitude values until the entire band is decoded while(rowptr < fastendptr) { // Read a byte from the bitstream byte = *CurrentWord++; // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntryFast(fsm, index, entryfast); PrintFSMEntryFast(fsm, index, entryfast, logfile); #endif // Set the pointer to the next state UpdateFSM(fsm, (int)entryfast->next_state); // Skip the decoded zero runs rowptr = &rowptr[entryfast->pre_post_skip & 0xfff]; // Write down the first decoded magnitude *((uint32_t *)rowptr) = entryfast->values; // Skip the appropriate distance rowptr = &rowptr[entryfast->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntryFast(fsm, index, entryfast); PrintFSMEntryFast(fsm, index, entryfast, logfile); #endif // set the pointer to the next state UpdateFSM(fsm, (int)entryfast->next_state); // Skip the decoded zero runs rowptr = &rowptr[entryfast->pre_post_skip & 0xfff]; // Write down the first decoded magnitude *((uint32_t *)rowptr) = entryfast->values; // Skip the decoded zero runs rowptr = &rowptr[entryfast->pre_post_skip >> 12]; } offset = CurrentWord - startCurrentWord; stream->lpCurrentWord += offset; stream->nWordsUsed -= (int)offset; // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while(bandendptr >= rowptr) #else for (;;) #endif { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif #if (0 && DEBUG) PrintBitstreamPosition(stream, logfile); #endif // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntry(fsm, index, entry); PrintFSMEntry(fsm, index, entry, logfile); #endif #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude if ((value = entry->value0)) { rowptr[0] = value;//SATURATE(value); } // Write down the second decoded magnitude if ((value = entry->value1)) { rowptr[1] = value;//SATURATE(value); } // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntry(fsm, index, entry); PrintFSMEntry(fsm, index, entry, logfile); #endif #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude if ((value = entry->value0)) { rowptr[0] = value;//SATURATE(value); } // Write down the second decoded magnitude if ((value = entry->value1)) { rowptr[1] = value;//SATURATE(value); } // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant) { int index, byte; FSMENTRY *entry; PIXEL16S *rowptr = image; PIXEL16S *bandendptr; PIXEL16S *fastendptr; int32_t value; uint8_t *startCurrentWord = stream->lpCurrentWord; uint8_t *CurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif // Reset the decoder ResetFSM(fsm); //This is been called with non-prequantized FSM if(quant>1) level /= quant; pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif fastendptr = bandendptr; fastendptr -= 1000; // Decode runs and magnitude values until the entire band is decoded while(rowptr < fastendptr) { // Read a byte from the bitstream byte = *CurrentWord++; // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord); stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord)); // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while(((intptr_t)bandendptr - (intptr_t)rowptr) >= 0) #else for (;;) #endif { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } // This version of DecodeBandFSM() assumes that the gap between width and pitch has been coded as // zero runs. Therefore decoded magnitude values can be written down without the need to check // if the end of a row has been reached. Hence the total number of conditionals in DecodeBandFSM // can be significantly reduced. // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding #if !_INDIVIDUAL_ENTRY #if 0 //dan20041030 not used bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif pitch /= sizeof(PIXEL8S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; // Decode runs and magnitude values until the entire band is decoded //while (rowptr < bandendptr) for (;;) { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Check that the decoder has not overrun the output array //assert(rowptr < bandendptr); // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif #if 1 // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } #endif // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif #if 1 // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } #endif // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #endif #elif _SINGLE_FSM_TABLE bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte, i; FSMENTRY *entry,*firstentry = fsm->table->firstentry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; pitch /= sizeof(PIXEL8S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the entire band is decoded for (;;) { // Check that the decoder has not overrun the output array //assert(rowptr < bandendptr); // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN entry = firstentry+i; //DAN // Return if the subband is decoded completely if(entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN entry = firstentry+i; //DAN // Return if the subband is decoded completely if(entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #else bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; #if 1 __declspec(align(4)) FSMENTRY buffer; #endif pitch /= sizeof(PIXEL8S); // zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; // Decode runs and magnitude values until the entire band is decoded for (;;) { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntryIndividual(fsm, index); // Return if the subband is decoded completely if(entry == NULL) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // Set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntryIndividual(fsm, index); // Return if the subband is decoded completely if (entry == NULL) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // Set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #endif // Decode the highpass band coefficients but do not write them out - used in SIF mode bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; pitch /= sizeof(PIXEL8S); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the entire band is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); } } #if _TIMING extern TIMER tk_fastruns; #endif #if 0 //dan20041030 not used // New version of coefficient runs decoder that uses a finite state machine with a scaling factor bool DecodeFastRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { CODEC_ERROR error = CODEC_ERROR_OKAY; FILE *logfile = decoder->logfile; int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026 // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *rowptr; int row = 0; int pitch; int pixel_type = wavelet->pixel_type[band_index]; decoder->codec.active_codebook = 0; // reset CODEC state // Must have a valid wavelet assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM assert(fsm != NULL); if(fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels assert(pixel_type == PIXEL_TYPE_8S); START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch8s; // Use the 8-bit pitch //pitch = wavelet->pitch; // The finite state machine does not support a marker at the end of rows #if RUNS_ROWEND_MARKER assert(0); #endif // Get one byte from the bitstream and decode 4 bits at a time result = DecodeBandFSM8sNoGap(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch); assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "DecodeFastRunsFSM8s, band index: %d\n", band_index); DumpWaveletRow(wavelet, band_index, 0, logfile); } #endif end: STOP(tk_fastruns); return true; } #endif #if _DEQUANTIZE_IN_FSM void ReQuantFSM(FSM *fsm, int quant) { int count = 0; int i, j; short *restore = &fsm->restoreFSM[0]; #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { entry[j].value0 = restore[count++]; entry[j].value1 = restore[count++]; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm_table.entries_ind[i]; if(entry) { entry->value0 = restore[count++]; entry->value1 = restore[count++]; } } #endif } void DeQuantFSM(FSM *fsm, int quant) { int i, j; if(fsm->LastQuant > 1 && fsm->LastQuant != quant) { ReQuantFSM(fsm, fsm->LastQuant); } else if(fsm->LastQuant == quant) { return; } if(fsm->InitizedRestore == 0) { short *restore = &fsm->restoreFSM[0]; int count = 0; #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { restore[count++] = entry[j].value0; restore[count++] = entry[j].value1; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm->table.entries_ind[i]; if(entry) { restore[count++] = entry->value0; restore[count++] = entry->value1; } } #endif fsm->InitizedRestore = 1; } #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { if(entry[j].value0 < 0x7ff0) // band end trailer entry[j].value0 *= quant; entry[j].value1 *= quant; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm->table.entries_ind[i]; if(entry) { if(entry->value0 < 0x7ff0) // band end trailer etc entry->value0 *= quant; entry->value1 *= quant; } } #endif fsm->LastQuant = quant; } #endif // _DEQUANTIZE_IN_FSM // New version of coefficient runs decoder that uses a finite state machine with a scaling factor //dan 7-11-03 bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height, int threading) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result = true; int quant = wavelet->quantization[band_index]; int active_codebook = decoder->codec.active_codebook; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[active_codebook]; int size; PIXEL *rowptr; //int row = 0; int pitch; CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int num_subbands = codec->num_subbands; //int pixel_type = wavelet->pixel_type[band_index]; int difference_coding = decoder->codec.difference_coding; //int localquant = 1; int peaklevel = 0; //int peaksize = 0; PIXEL *peakbase = NULL; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n", subband, decoder->codec.active_codebook, difference_coding); } #endif decoder->codec.active_codebook = 0; // reset CODEC state decoder->codec.difference_coding = 0; //reset state for next subband // Must have a valid wavelet assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM assert(fsm != NULL); if(fsm == NULL) return false; // All rows are treated as one long row that covers the entire band size = fsm->table.num_states; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S); START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; //pitch = wavelet->pitch8s; // Use the 8-bit pitch pitch = wavelet->pitch; peaklevel = codec->peak_table.level; peakbase = codec->peak_table.base; #if _THREADED threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0; if(threading) { decoder->entropy_worker_new.threads_used = 1; { //int start = stream->nWordsUsed; int end; struct entropy_data_new *data; int next_queue_num = decoder->entropy_worker_new.next_queue_num++; data = &decoder->entropy_worker_new.entropy_data[next_queue_num]; memcpy(&data->stream,stream, sizeof(BITSTREAM)); data->rowptr = rowptr; data->width = width; data->height = height; data->pitch = pitch; data->peaks = peakbase; data->level = peaklevel; data->quant = quant; data->wavelet = wavelet; data->band_index = band_index; data->active_codebook = active_codebook; data->difference_coding = difference_coding; // Start only a particular threadid if(next_queue_num == 0) { ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1); #if _DELAYED_THREAD_START==0 ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START); #endif } else { // Set the work count to the number of rows to process ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1); } { unsigned short tag = *(stream->lpCurrentWord-8) << 8; if(tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE)) { int chunksize; int value = *(stream->lpCurrentWord-6) << 8; value |= *(stream->lpCurrentWord-5); tag |= *(stream->lpCurrentWord-7); tag = NEG(tag); chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); chunksize *= 4; chunksize -= 8; { uint32_t *ptr = (uint32_t *)stream->lpCurrentWord; ptr += (chunksize>>2); if(*ptr != 0x00003800) // bandend { goto continuesearch; } } stream->lpCurrentWord += chunksize; stream->nWordsUsed -= chunksize; end = stream->nWordsUsed; } else { continuesearch: while(*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend { stream->lpCurrentWord += 4; stream->nWordsUsed -= 4; } end = stream->nWordsUsed; } } } } else #endif // _THREADED { DeQuantFSM(fsm, quant); if (peaklevel) { result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1); } else { #if _DEBUG result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile); #else result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch); #endif } if(difference_coding) { int x,y; PIXEL *line = rowptr; for(y=0;y<height;y++) { for(x=1;x<width;x++) { line[x] += line[x-1]; } line += pitch/2; } } if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band_index); } } assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } //end: STOP(tk_fastruns); return true; } bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026 // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *rowptr; //int row = 0; int pitch; //int pixel_type = wavelet->pixel_type[band_index]; decoder->codec.active_codebook = 0; // reset CODEC state // Must have a valid wavelet assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM assert(fsm != NULL); if(fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is 8bit/pixel assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S); START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch8s; // Use the 8-bit pitch // The finite state machine does not support a marker at the end of rows #if RUNS_ROWEND_MARKER assert(0); #endif #if 1 // Get one byte from the bitstream and decode 4 bits at a time result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch); assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif //end: STOP(tk_fastruns); return true; } // The third version is also based on the finite state machine decoder with // gaps between rows encoded as zero runs, but dequantization is performed as // the highpass values are read from the bitstream and placed into a row buffer. // The highpass values are not written into the wavelet highpass band. // Eventually this routine will be merged into the routine DecodeTemporalBand8s // since this routine contains code specific to the inverse temporal transform // and DecodeTemporalBand8s has become a shell. #if 0 bool DecodeBandRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height, IMAGE *frame0, IMAGE *frame1) { CODEC_ERROR error = CODEC_ERROR_OKAY; FILE *logfile = decoder->logfile; int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm; // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *lowpass = wavelet->band[0]; int lowpass_pitch = wavelet->pitch; //PIXEL8S *rowptr; int row = 0; int pitch; int row_width; // Width of the encoded row of highpass coefficients PIXEL *even = frame0->band[0]; PIXEL *odd = frame1->band[0]; int even_pitch = frame0->pitch; int odd_pitch = frame1->pitch; int pixel_type = wavelet->pixel_type[band_index]; int quantization = wavelet->quantization[band_index]; PIXEL *buffer; size_t buffer_size; int index, byte; FSMENTRY *entry; int column = 0; int32_t value; int buffer_row_size; PIXEL *highpass; // Check that the wavelet into which the band will be decoded is valid assert(wavelet != NULL); if (wavelet == NULL) return false; // Check that the finite state machine is valid assert(fsm != NULL); if (fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check that the band was encoded using 8-bit signed coefficients assert(pixel_type == PIXEL_TYPE_8S); pitch = wavelet->pitch8s; // Use the pitch for 8-bit packed rows // Get the buffer for storing one row of dequantized highpass coefficients buffer = (PIXEL *)decoder->buffer; buffer_size = decoder->buffer_size; // The finite state machine does not support a marker at the end of each row assert(RUNS_ROWEND_MARKER == 0); /***** Start of code included from DecodeBandFSM8s() *****/ // Check that one byte can be processes as two 4-bit nibbles assert(BITSTREAM_WORD_SIZE == (2 * FSM_INDEX_SIZE)); // Check that the bitstream buffer is empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL8S); buffer_row_size = pitch * sizeof(PIXEL); lowpass_pitch /= sizeof(PIXEL); even_pitch /= sizeof(PIXEL); odd_pitch /= sizeof(PIXEL); // Compute the address of the row after the last row in the band //maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes //row_size = ALIGN16(row_size); // Check that the buffer is large enough to hold one row //assert(buffer_size >= row_size); assert(buffer_size >= buffer_row_size); // Use the buffer for the row or highpass coefficients highpass = buffer; #if 1 // The row spans the allocated width (pitch) of the band in no gap mode row_width = pitch; #else // For debugging row_width = wavelet->encoded_pitch/sizeof(PIXEL8S); #endif // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); // Decode zero runs and magnitude values (with appended sign bit) // until the marker for the band end trailer has been decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); /***** Decode the first 4-bit nibble *****/ // Decode the first 4-bit nibble index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Process the rest of the subband ZeroHighPassRow(highpass, buffer_row_size); while (++row < height) { // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < row_width); // Dequantize the value and store it in the highpass row buffer highpass[column] = quantization * value; column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < row_width); if (column < (row_width - 1)) { // Store both values in the current row highpass[column++] = quantization * entry->value0; highpass[column++] = quantization * entry->value1; } else { value = entry->value0; highpass[column] = quantization * value; value = entry->value1; // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); column = 0; highpass[column++] = quantization * value; } } /***** Decode the second 4-bit nibble *****/ // Decode the second 4-bit nibble index = byte & FSM_INDEX_MASK; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Process the rest of the subband ZeroHighPassRow(highpass, buffer_row_size); while (++row < height) { // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; } ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < row_width); highpass[column] = quantization * value; column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < row_width); if (column < (row_width - 1)) { // Store both highpass values in the current row highpass[column++] = quantization * entry->value0; highpass[column++] = quantization * entry->value1; } else { highpass[column] = quantization * entry->value0; value = entry->value1; // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); column = 0; highpass[column++] = quantization * value; } } } /***** End of the code included from DecodeBandFSM8s() *****/ #if 0 assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif #if 0 end: return true; #endif } #endif /***** End of the code for the finite state machine decoder *****/ #if 1 // The second version applies the horizontal inverse filters row by row, so the // memory access pattern is more efficient. The lowpass and highpass temporal // coefficients for each row are inverted and packed into the output in one pass. // Apply the inverse horizontal-temporal transform and pack the output into a buffer void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int output_width; int channel; int row; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough #if DEBUG assert((2 * num_channels * temporal_row_size) <= buffer_size); #endif // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; //TODO: Need to recode the buffer allocations using the scratch space API // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Invert the horizontal transform applied to the temporal highpass row //DAN20051004 -- possible reversiblity issue //InvertHorizontalRow8sBuffered //----------------------- Maybe bad InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; if (precision == CODEC_PRECISION_10BIT) { // Invert the temporal bands from all channels and pack output pixels switch (frame->format) { // Need to reduce the resolution from 10 bits to 8 bits during the inverse case DECODED_FORMAT_YUYV: InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset); break; case DECODED_FORMAT_UYVY: InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset); break; default: assert(0); break; } } else // Older code for 8-bit precision { int format; assert(precision == CODEC_PRECISION_8BIT); switch (frame->format) { case DECODED_FORMAT_YUYV: format = COLOR_FORMAT_YUYV; break; case DECODED_FORMAT_UYVY: format = COLOR_FORMAT_UYVY; break; } // Invert the temporal bands from all channels and pack output pixels InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset, format); } // Advance to the next row in the packed output image output += field_pitch; } } #endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, int chroma_offset, int precision) { FILE *logfile = decoder->logfile; TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; uint8_t *output_row_ptr = output; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int output_width; int channel; int row; HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore; int return_value; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Divide the buffer space between the four threads buffer_size /= 4; buffer += buffer_size * thread_index; // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough assert((2 * num_channels * temporal_row_size) <= buffer_size); // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; //TODO: Need to recode the buffer allocations using the scratch space API // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output); } #endif /* if (thread_index == 0) { row = 0; row_step = 1; } else if (thread_index == 1) { row = half_height - 1; row_step = -1; // Move to the bottom of the transform and process moving up for (channel = 0; channel < num_channels; channel++) { int offset = horizontal_pitch[channel] * (half_height - 1); horizontal_lowlow[channel] += offset; horizontal_lowhigh[channel] += offset; horizontal_highlow[channel] += offset; horizontal_highhigh[channel] += offset; horizontal_pitch[channel] = NEG(horizontal_pitch[channel]); horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]); } output += field_pitch * (half_height - 1); field_pitch = NEG(field_pitch); } else { assert(0); // what about middle threads? } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n", thread_index, row, row_step, field_pitch); } #endif */ // Loop until all of the rows have been processed for (;;) { // Wait for one row from each channel to invert the transform return_value = WaitForSingleObject(row_semaphore, 0); // Determine the index of this worker thread if (return_value == WAIT_OBJECT_0) { if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } row = decoder->interlaced_worker.current_row++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); output_row_ptr = output; output_row_ptr += row * 2 * output_pitch; for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; IMAGE *wavelet = transform[channel]->wavelet[frame_index]; horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; horizontal_lowlow[channel] += pitch*row; horizontal_lowhigh[channel] += pitch*row; horizontal_highlow[channel] += pitch*row; horizontal_highhigh[channel] += pitch*row; } } if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height) { //PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size); // assert(0 <= row && row < half_height); #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n", row, thread_index, output_row_ptr); } #endif // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; #if (0 && DEBUG) // Invert the horizontal transform by duplicating the lowpass pixels InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); #else // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); #endif // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel //horizontal_lowlow[channel] += pitch; //horizontal_lowhigh[channel] += pitch; //horizontal_highlow[channel] += pitch; //horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; if (precision == CODEC_PRECISION_10BIT) { // Invert the temporal bands from all channels and pack output pixels switch (frame->format) { // Need to reduce the resolution from 10 bits to 8 bits during the inverse case DECODED_FORMAT_YUYV: InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset); break; case DECODED_FORMAT_UYVY: InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset); break; default: assert(0); break; } } else // Older code for 8-bit precision { int format; assert(precision == CODEC_PRECISION_8BIT); switch (frame->format) { case DECODED_FORMAT_YUYV: format = COLOR_FORMAT_YUYV; break; case DECODED_FORMAT_UYVY: format = COLOR_FORMAT_UYVY; break; } // Invert the temporal bands from all channels and pack output pixels InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset, format); } // Advance to the next row in the input transforms //row += row_step; // Advance to the next row in the packed output image //output += field_pitch; } else { // No more rows to process break; } } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Finished transform, thread index: %d\n", thread_index); } #endif } #endif //#if BUILD_PROSPECT // Apply the inverse horizontal-temporal transform and output rows of luma and chroma #if 0 void TransformInverseFrameToRow16u(TRANSFORM *transform[], int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, char *buffer, size_t buffer_size, int chroma_offset, int precision) #else void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) #endif { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Buffers for the rows in the temporal wavelet (reused for each channel) PIXEL *temporal_lowpass; PIXEL *temporal_highpass; int output_row_width[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int luma_width = frame_width; int chroma_width = luma_width/2; int channel; int row; #if (1 && DEBUG_ROW16U) PIXEL16U *output_buffer; #endif // This routine should only be called to decode rows of 16-bit luma and chroma //assert(frame->format == DECODED_FORMAT_YR16); // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass) // plus the buffer used by the inverse horizontal transform for its intermediate results #if DEBUG assert((2 * temporal_row_size) <= buffer_size); #endif // Allocate buffers for one row of lowpass and highpass temporal coefficients temporal_lowpass = (PIXEL *)&buffer[0]; temporal_highpass = (PIXEL *)&buffer[temporal_row_size]; #if (1 && DEBUG_ROW16U) output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size]; #endif // Initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Compute the width of each row of output pixels output_row_width[channel] = (channel == 0) ? luma_width : chroma_width; } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { #if (1 && DEBUG_ROW16U) PIXEL16U *output_row_ptr = output_buffer; PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS]; int planar_pitch[TRANSFORM_MAX_CHANNELS]; ROI strip = {luma_width, 2}; uint8_t *yuv_output = (uint8_t *)output; uint8_t *output1 = yuv_output; uint8_t *output2 = yuv_output + output_pitch; #else PIXEL16U *output_row_ptr = output; #endif // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Invert the horizontal transform applied to the temporal lowpass row BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); } else { // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); } //***DEBUG*** //ZeroMemory(temporal_highpass, temporal_row_size); //FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; #if (1 && DEBUG_ROW16U) // Write the rows of 16-bit pixels to a temporary buffer planar_output[channel] = output_row_ptr; planar_pitch[channel] = output_pitch * sizeof(PIXEL); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, planar_output[channel], planar_pitch[channel], output_row_width[channel], frame_width, chroma_offset, precision); //if (channel > 0) if (0) { uint8_t *output3 = (uint8_t *)planar_output[channel]; uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel]; int output_size = output_row_width[channel] * sizeof(PIXEL); int fill_value = (128 << 8); //ZeroMemory(output3, output_size); //ZeroMemory(output4, output_size); FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value); FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value); } #else // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); #endif // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; // Check the output row alignment assert(ISALIGNED16(output_row_ptr)); } // Advance to the next group of rows in the output image output += field_pitch/sizeof(PIXEL16U); } } //#endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, int chroma_offset, int precision) { FILE *logfile = decoder->logfile; TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; // Buffers for the rows in the temporal wavelet (reused for each channel) PIXEL *temporal_lowpass; PIXEL *temporal_highpass; int output_row_width[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int luma_width = frame_width; int chroma_width = luma_width/2; int channel; int row; HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore; int return_value; #if (1 && DEBUG_ROW16U) PIXEL16U *output_buffer; #endif // This routine should only be called to decode rows of 16-bit luma and chroma //assert(frame->format == DECODED_FORMAT_YR16); // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); #if 0 if (thread_index == 1) { // Skip over the buffer space used by the other thread size_t buffer_usage = 2 * temporal_row_size; buffer += buffer_usage; buffer_size -= buffer_usage; } #else // Divide the buffer space between the two threads buffer_size /= 4; buffer += buffer_size * thread_index; #endif // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass) // plus the buffer used by the inverse horizontal transform for its intermediate results assert((2 * temporal_row_size) <= buffer_size); // Allocate buffers for one row of lowpass and highpass temporal coefficients temporal_lowpass = (PIXEL *)&buffer[0]; temporal_highpass = (PIXEL *)&buffer[temporal_row_size]; #if (1 && DEBUG_ROW16U) output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size]; #endif // Initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Compute the width of each row of output pixels output_row_width[channel] = (channel == 0) ? luma_width : chroma_width; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output); } #endif /* if (thread_index == 0) { row = 0; row_step = 1; } else if (thread_index == 1) { row = half_height - 1; row_step = -1; // Move to the bottom of the transform and process moving up for (channel = 0; channel < num_channels; channel++) { int offset = horizontal_pitch[channel] * (half_height - 1); horizontal_lowlow[channel] += offset; horizontal_lowhigh[channel] += offset; horizontal_highlow[channel] += offset; horizontal_highhigh[channel] += offset; horizontal_pitch[channel] = NEG(horizontal_pitch[channel]); //horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]); } //output += field_pitch * (half_height - 1); output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U); output_pitch = NEG(output_pitch); field_pitch = NEG(field_pitch); } else { assert(0); // middle threads } */ #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n", thread_index, row, row_step, field_pitch); } #endif // Loop until all of the rows have been processed for (;;) { PIXEL16U *output_row_ptr; // Wait for one row from each channel to invert the transform return_value = WaitForSingleObject(row_semaphore, 0); // Determine the index of this worker thread if (return_value == WAIT_OBJECT_0) { if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } row = decoder->interlaced_worker.current_row++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); output_row_ptr = output; output_row_ptr += row * output_pitch; for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; IMAGE *wavelet = transform[channel]->wavelet[frame_index]; horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; horizontal_lowlow[channel] += pitch*row; horizontal_lowhigh[channel] += pitch*row; horizontal_highlow[channel] += pitch*row; horizontal_highhigh[channel] += pitch*row; } } if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height) { assert(0 <= row && row < half_height); if(decoder->frame.resolution == DECODED_RESOLUTION_FULL) { // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; } } else if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; // Invert the horizontal transform applied to the temporal lowpass row BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; } } } else { // No more rows to process break; } } #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Finished transform, thread index: %d\n", thread_index); } #endif } #endif #if 0 DWORD WINAPI TransformInverseFrameToRow16utopThread(LPVOID param) { struct data { TRANSFORM *transform[3]; int frame_index; int num_channels; uint8_t *output; int output_pitch; FRAME_INFO *info; SCRATCH *scratch; int chroma_offset; int precision; } *dptr; dptr = (struct data *)param; TransformInverseFrameToRow16utop(dptr->transform, dptr->frame_index, dptr->num_channels, (PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info, dptr->scratch, dptr->chroma_offset, dptr->precision); return 0; } DWORD WINAPI TransformInverseFrameToRow16ubottomThread(LPVOID param) { struct data { TRANSFORM *transform[3]; int frame_index; int num_channels; uint8_t *output; int output_pitch; FRAME_INFO *info; SCRATCH *scratch; int chroma_offset; int precision; } *dptr; dptr = (struct data *)param; TransformInverseFrameToRow16ubottom(dptr->transform, dptr->frame_index, dptr->num_channels, (PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info, dptr->scratch, dptr->chroma_offset, dptr->precision); return 0; } #endif extern void fast_srand( int seed ); // Apply the inverse horizontal-temporal transform and pack the output into a buffer #if 0 void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, char *buffer, size_t buffer_size, int chroma_offset, int precision) #else void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) #endif { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; //int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); size_t temporal_buffer_size = 2 * num_channels * temporal_row_size; #if DEBUG size_t yuv_row_size = frame_width * 2; #endif char *yuv_buffer; size_t yuv_buffer_size; int field_pitch = 2 * output_pitch; int format = frame->format; bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32); int output_width; int channel; int row; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough assert((2 * num_channels * temporal_row_size) <= buffer_size); // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } // Allocate buffer space for the intermediate YUV data yuv_buffer = buffer + temporal_buffer_size; yuv_buffer_size = buffer_size - temporal_buffer_size; #if DEBUG assert(yuv_buffer_size >= 2 * yuv_row_size); #endif if (inverted) { output += (frame_height - 1) * output_pitch; output_pitch = (- output_pitch); field_pitch = (- field_pitch); } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; //#if BUILD_PROSPECT if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64) { // Invert the temporal bands from all channels and pack as V210 output InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, yuv_buffer, yuv_buffer_size, format, chroma_offset, precision); } else //#endif { // Invert the temporal bands from all channels and pack as 8-bit output InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, yuv_buffer, yuv_buffer_size, format, frame->colorspace, chroma_offset, precision, row); } // Advance to the next row in the packed output image output += field_pitch; } } void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format) { bool inverted = false; size_t output_size; START(tk_convert); // Determine the type of conversion switch (format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted); break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted); break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_UYVY: ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted); break; default: // Unsupported format (return a blank frame) assert(0); output_size = image->height * output_pitch; memset(output_buffer, COLOR_CHROMA_ZERO, output_size); break; } STOP(tk_convert); } void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height, int output_pitch, bool inverted) { IMAGE *y_image = images[0]; IMAGE *u_image = images[1]; IMAGE *v_image = images[2]; int width = y_image->width; int height = output_height; PIXEL *y_row_ptr = y_image->band[0]; PIXEL *u_row_ptr = u_image->band[0]; PIXEL *v_row_ptr = v_image->band[0]; int y_pitch = y_image->pitch/sizeof(PIXEL); int u_pitch = u_image->pitch/sizeof(PIXEL); int v_pitch = v_image->pitch/sizeof(PIXEL); uint8_t *outrow = output_buffer; uint8_t *outptr; int row, column; // Definitions for optimization //const int column_step = 2 * sizeof(__m64); // Column at which post processing must begin //int post_column = width - (width % column_step); // The output pitch should be a positive number before inversion assert(output_pitch > 0); // Should the image be inverted? if (inverted) { outrow += (height - 1) * output_pitch; // Start at the bottom row output_pitch = NEG(output_pitch); // Negate the pitch to go up } for (row = 0; row < height; row++) { outptr = outrow; // Fill the rest of the output row for (column = 0; column < width; column+=4) { int chroma_column = column>>1; *(outptr++) = SATURATE_8U((y_row_ptr[column]+y_row_ptr[column+1])>>5); *(outptr++) = SATURATE_8U((v_row_ptr[chroma_column]+v_row_ptr[chroma_column+1])>>5); *(outptr++) = SATURATE_8U((y_row_ptr[column+2]+y_row_ptr[column+3])>>5); *(outptr++) = SATURATE_8U((u_row_ptr[chroma_column]+u_row_ptr[chroma_column+1])>>5); } // Advance to the next rows in the input and output images y_row_ptr += y_pitch;// 3D Work u_row_ptr += u_pitch; v_row_ptr += v_pitch; outrow += output_pitch; } } // Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth) { //IMAGE *image = frame->channel[0]; bool inverted = false; int output_width = info->width; int output_height = info->height; int descale = precision - 8; // Get the color format from the decoded format int color_format = info->format & COLOR_FORMAT_MASK; // Must compile this routine with switches set for decoding to 8-bit unsigned pixels #if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0) assert(0); return; #endif START(tk_convert); #if 0 // Fill the output buffer with blank values EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format); #endif // Determine the type of conversion switch (info->format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels); } else { ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB24, info->colorspace, inverted, descale); } break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels); } else { ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB32, info->colorspace, inverted, descale); } break; case DECODED_FORMAT_RG48: if(encode_format == ENCODED_FORMAT_BAYER) { ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height, output_pitch, 2, num_channels); } else if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { int scale = 1; if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) scale = 2; ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height, output_pitch, scale, num_channels); } else { ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width, output_height, output_pitch, info->colorspace, inverted, descale, info->format, whitebitdepth); } break; case DECODED_FORMAT_RG64: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch, descale, num_channels, info->format & 0xffff); } else { assert(0); } break; case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch, descale, num_channels, info->format & 0xffff); } else { ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width, output_height, output_pitch, info->colorspace, inverted, descale, info->format, whitebitdepth); } break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: if (precision == CODEC_PRECISION_10BIT) { int lineskip = 1; // 3D Work int pitch = output_pitch; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) { if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work { lineskip = 2; if(decoder->channel_blend_type == 3) pitch *= 2; } } if((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side { SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted); } else { //ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip); ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip); } } else { //ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted); ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted); } break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif #if 0 case DECODED_FORMAT_UYVY: ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted); break; #endif //#if BUILD_PROSPECT case DECODED_FORMAT_V210: if (precision == CODEC_PRECISION_10BIT) { ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted); } else { //ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted); assert(0); } break; //#endif case DECODED_FORMAT_YU64: // DAN04262004 ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision); break; //#if BUILD_PROSPECT case DECODED_FORMAT_YR16: ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision); break; //#endif default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi, uint8_t *output_buffer, int output_pitch, int frame_width, int format, int colorspace) { bool inverted = false; int output_width = roi.width; #if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0) #error Must set compile-time switches to decode to 8-bit pixels #endif START(tk_convert); #if _ENCODE_CHROMA_OFFSET #error Cannot handle images encoded with a non-zero chroma offset #endif // Determine the type of conversion switch(format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB24, colorspace, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB32, colorspace, inverted); break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi, output_buffer, output_pitch, frame_width, format); break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_UYVY: ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_UYVY, colorspace, inverted); break; default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi, uint8_t *output_buffer, int output_pitch, int frame_width, int format, int colorspace) { bool inverted = false; int output_width = roi.width; START(tk_convert); // Determine the type of conversion switch(format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: //ConvertPlanarYUVToRGB ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB24, colorspace, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB32, colorspace, inverted); break; case COLOR_FORMAT_WP13: case COLOR_FORMAT_B64A: case COLOR_FORMAT_RG48: case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL); break; case DECODED_FORMAT_YUYV: assert(0);// These routines are not yet updated for ROW16u inputs ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi, output_buffer, output_pitch, frame_width, format); break; case DECODED_FORMAT_UYVY: assert(0);// These routines are not yet updated for ROW16u inputs ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_UYVY, colorspace, inverted); break; default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } // Convert one row of packed YUYV to the specified color void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision) { size_t row_size = 2 * length; bool inverted = false; START(tk_convert); // Determine the type of color conversion switch (format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision); break; case DECODED_FORMAT_YUYV: if(precision == 8) memcpy(output, input, row_size); else { //need to dither to 8-bit assert(0); } break; case DECODED_FORMAT_UYVY: if(precision == 8) ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY); else { //need to dither to 8-bit assert(0); } break; //#if BUILD_PROSPECT case DECODED_FORMAT_V210: assert(0); // should get here with 8bit data. //ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210); break; case DECODED_FORMAT_YU64: assert(0); // should get here with 8bit data. //ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64); break; case DECODED_FORMAT_BYR3: case DECODED_FORMAT_BYR4: assert(0); // should get here with 8bit data. //ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64); break; //#endif default: // Unsupported format (output a blank frame) assert(0); memset(output, 0, row_size); break; } STOP(tk_convert); } #if _THREADED_DECODER IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index, int width, int height, int level, int type) { IMAGE *wavelet = transform->wavelet[index]; assert(decoder != NULL && transform != NULL); if (decoder != NULL && transform != NULL) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // Lock access to the wavelet data #if _DELAYED_THREAD_START==0 Lock(&decoder->entropy_worker_new.lock); #endif // Get the wavelet from the transform data structure (thread safe) wavelet = transform->wavelet[index]; // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; // Unlock access to the wavelet data #if _DELAYED_THREAD_START==0 Unlock(&decoder->entropy_worker_new.lock); #endif } return wavelet; } // Update the codec state with the information in a tag value pair CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif bool optional = false; int chunksize = 0; bool result; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); optional = true; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n", tag, value, optional); } #endif switch (tag) { case CODEC_TAG_ZERO: // Used internally assert(0); // Should not occur in the bitstream error = CODEC_ERROR_INVALID_BITSTREAM; break; case CODEC_TAG_SAMPLE: // Type of sample //assert(0); if (value == SAMPLE_TYPE_CHANNEL) { result = DecodeSampleChannelHeader(decoder, input); if (!result) error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER; else error = CODEC_ERROR_OKAY; } break; case CODEC_TAG_INDEX: // Sample index table //assert(0); // Need to figure out how to return the group index { int count = value; uint32_t *index = (uint32_t *)(&codec->channel_size[0]); DecodeGroupIndex(input, index, count); codec->num_channels = count; } break; case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband? { // This tag is obsolete and not used in modern streams int subband = value; // Check that the subband number makes sense assert(0 <= subband && subband <= codec->max_subband); if (! (0 <= subband && subband <= codec->max_subband)) { error = CODEC_ERROR_DECODING_SUBBAND; break; } // Decompress the subband result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } break; case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act. codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete { // This tag value pair marks the beginning of the encoded coefficients // The subband number has already been decoded int subband = codec->band.subband; result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } break; case CODEC_TAG_ENTRY: // Entry in sample index assert(0); // Need to figure out how to return the group index break; case CODEC_TAG_MARKER: // Bitstream marker { int marker = value; uint8_t *current_position; // Save the current bitstream position current_position = GetBitstreamPosition(input); current_position -= 4; // Step back to before the GetSegment i.e. the TAG if (IsLowPassHeaderMarker(marker)) { // Save the bitstream position for the start of the channel codec->channel_position = current_position; } else if (IsLowPassBandMarker(marker)) { int subband = 0; result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } } break; case CODEC_TAG_VERSION_MAJOR: // Version assert(0); break; case CODEC_TAG_VERSION_MINOR: // Minor version number assert(0); break; case CODEC_TAG_VERSION_REVISION: // Revision number assert(0); break; case CODEC_TAG_VERSION_EDIT: // Edit number assert(0); break; case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags assert(0); break; case CODEC_TAG_TRANSFORM_TYPE: // Type of transform assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST); if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST) { int i; codec->transform_type = value; for(i=0;i<TRANSFORM_MAX_CHANNELS;i++) { TRANSFORM *transform = decoder->transform[i]; if(transform) { GetTransformPrescale(transform, codec->transform_type, codec->precision); } } } else error = CODEC_ERROR_TRANSFORM_TYPE; break; case CODEC_TAG_NUM_FRAMES: // Number of frames in the group assert(0 <= value && value <= TRANSFORM_NUM_FRAMES); if (0 <= value && value <= TRANSFORM_NUM_FRAMES) codec->num_frames = value; else error = CODEC_ERROR_NUM_FRAMES; break; case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform assert(value <= CODEC_MAX_CHANNELS); if (value <= CODEC_MAX_CHANNELS) codec->num_channels = value; else error = CODEC_ERROR_NUM_CHANNELS; break; case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform assert(0 < value && value <= TRANSFORM_NUM_WAVELETS); if (0 < value && value <= TRANSFORM_NUM_WAVELETS) codec->num_wavelets = value; else error = CODEC_ERROR_NUM_WAVELETS; break; case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS); if (0 < value && value <= TRANSFORM_NUM_SUBBANDS) codec->num_subbands = value; else error = CODEC_ERROR_NUM_SUBBANDS; break; case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels assert(0 < value && value <= TRANSFORM_NUM_SPATIAL); if (0 < value && value <= TRANSFORM_NUM_SPATIAL) codec->num_spatial = value; else error = CODEC_ERROR_NUM_SPATIAL; break; case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet assert(value == TRANSFORM_FIRST_WAVELET); if (value == TRANSFORM_FIRST_WAVELET) codec->first_wavelet = value; else error = CODEC_ERROR_FIRST_WAVELET; break; case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel assert(0); break; case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum codec->sample_done = true; break; case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start codec->frame.type = value; break; case CODEC_TAG_FRAME_WIDTH: // Width of the frame codec->frame.width = value; break; case CODEC_TAG_FRAME_HEIGHT: // Height of the frame codec->frame.height = value; //DAN20080729 -- Initialize the default colorspace based on clip resolution if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED) { int internalheight = value; int internalwidth = codec->frame.width; if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { internalwidth *= 2; internalheight *= 2; } if(internalheight > 576 || internalwidth > 720) decoder->frame.colorspace |= COLOR_SPACE_CG_709; else decoder->frame.colorspace |= COLOR_SPACE_CG_601; } //if(decoder->frame.colorspace_filedefault) // decoder->frame.colorspace = decoder->frame.colorspace_filedefault; if(decoder->frame.colorspace_override) decoder->frame.colorspace = decoder->frame.colorspace_override; break; case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729 if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) value &= ~(COLOR_SPACE_BT_601|COLOR_SPACE_BT_709); // Bayer has no 601 vs 709, //there was a bug in 3.9.4 that had bayer flagged as 601. if(decoder->frame.colorspace_override) decoder->frame.colorspace = decoder->frame.colorspace_override; else { if(decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422) { decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709); decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709)); //Let the VSRGB status be controllable by the calling application (e.g. Vegas) } else { decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB); decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB)); } } decoder->frame.colorspace_filedefault = value; break; case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA) assert(0); break; case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels codec->input_format = value; // Set the encoded format if it has not already been set // error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value); break; case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data case CODEC_TAG_OLD_ENCODED_FORMAT: codec->encoded_format = value; if(codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3) codec->encoded_format = ENCODED_FORMAT_RGB_444; break; case CODEC_TAG_FRAME_INDEX: // Position of frame within the group codec->frame.group_index = value; break; case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum codec->sample_done = true; break; case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band codec->lowpass.subband = value; error = SetDefaultEncodedFormat(codec); break; case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels codec->lowpass.level = value; break; case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band codec->lowpass.width = value; break; case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band codec->lowpass.height = value; break; case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset codec->lowpass.margin.top = value; break; case CODEC_TAG_MARGIN_BOTTOM: codec->lowpass.margin.bottom = value; break; case CODEC_TAG_MARGIN_LEFT: codec->lowpass.margin.left = value; break; case CODEC_TAG_MARGIN_RIGHT: codec->lowpass.margin.right = value; break; case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters codec->lowpass.pixel_offset = value; break; case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding codec->lowpass.quantization = value; break; case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel codec->lowpass.bits_per_pixel = value; break; case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer assert(0); break; case CODEC_TAG_WAVELET_TYPE: // Type of wavelet codec->highpass.wavelet_type = value; break; case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform codec->highpass.wavelet_number = value; break; case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform codec->highpass.wavelet_level = value; break; case CODEC_TAG_NUM_BANDS: // Number of wavelet bands codec->highpass.num_bands = value; break; case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band codec->highpass.width = value; break; case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band codec->highpass.height = value; break; case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete) codec->highpass.lowpass_border = value; break; case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete) codec->highpass.highpass_border = value; break; case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band codec->highpass.lowpass_scale = value; break; case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band codec->highpass.lowpass_divisor = value; break; case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer assert(0); break; case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band codec->band.number = value; break; case CODEC_TAG_BAND_WIDTH: // Band data width codec->band.width = value; break; case CODEC_TAG_BAND_HEIGHT: // Band data height codec->band.height = value; break; case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band codec->band.subband = value; //assert(value != 255); break; case CODEC_TAG_BAND_ENCODING: // Encoding method for this band codec->band.encoding = value; break; case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band codec->band.quantization = value; break; case CODEC_TAG_BAND_SCALE: // Band scale factor codec->band.scale = value; break; case CODEC_TAG_BAND_TRAILER: // Band trailer assert(0); break; case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values assert(0); break; case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees assert(0); break; case CODEC_TAG_NUM_POSITIVES: // Number of positive values assert(0); break; case CODEC_TAG_NUM_NEGATIVES: // Number of negative values assert(0); break; case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes assert(0); break; case CODEC_TAG_CHANNEL: // Channel number assert(0); break; case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream //assert(0); break; //assert(0); case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits //assert(0); break; case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio codec->picture_aspect_x = value; //assert(0); break; case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio codec->picture_aspect_y = value; //assert(0); break; case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding // Progressive versus interlaced decoding is specified by the sample flags error = UpdateCodecFlags(codec, value); break; case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream codec->frame_number = value; break; // This TAG is now support as part of the universal decoder. // Only Prospect HD builds can decode 10bit. case CODEC_TAG_PRECISION: // Number of bits in the video source codec->precision = value; { int i; for(i=0;i<TRANSFORM_MAX_CHANNELS;i++) { TRANSFORM *transform = decoder->transform[i]; if(transform) { GetTransformPrescale(transform, codec->transform_type, codec->precision); } } } break; case CODEC_TAG_PRESCALE_TABLE: { int i; int prescale[TRANSFORM_MAX_WAVELETS] = {0}; for(i=0;i<TRANSFORM_MAX_WAVELETS;i++) prescale[i] = value >> (14-i*2) & 0x3; for(i=0;i<TRANSFORM_MAX_CHANNELS;i++) { TRANSFORM *transform = decoder->transform[i]; if(transform) { memcpy(transform->prescale, prescale, sizeof(prescale)); } } } break; case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP. codec->version[0] = (value>>12) & 0xf; codec->version[1] = (value>>8) & 0xf; codec->version[2] = value & 0xff; break; case CODEC_TAG_QUALITY_L: // codec->encode_quality &= 0xffff0000; codec->encode_quality |= value; break; case CODEC_TAG_QUALITY_H: // codec->encode_quality &= 0xffff; codec->encode_quality |= value<<16; break; case CODEC_TAG_BAND_CODING_FLAGS: codec->active_codebook = value & 0xf; // 0-15 valid code books codec->difference_coding = (value>>4) & 1; break; // Peak table processing case CODEC_TAG_PEAK_TABLE_OFFSET_L: codec->peak_table.offset &= ~0xffff; codec->peak_table.offset |= (value & 0xffff); codec->peak_table.base = (PIXEL *)(input->lpCurrentWord); codec->peak_table.level = 0; // reset for the next subband break; case CODEC_TAG_PEAK_TABLE_OFFSET_H: codec->peak_table.offset &= 0xffff; codec->peak_table.offset |= (value & 0xffff)<<16; codec->peak_table.level = 0; // reset for the next subband break; case CODEC_TAG_PEAK_LEVEL: codec->peak_table.level = value; codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL); break; case CODEC_TAG_PEAK_TABLE: //this is the chunk header, so we have peak data codec->peak_table.level = 0; // reset for the next subband //Just skip as the data was read ahead chunksize = value; chunksize &= 0xffff; input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; break; #if (1 && DEBUG) case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only) assert(0); break; #endif default: // Unknown tag if(tag & 0x4000) { if(tag & 0x2000) // i.e. 0x6xxx = 24bit size. { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else // 16bit size { chunksize = value; chunksize &= 0xffff; } } else if(tag & 0x2000) //24bit LONGs chunk size { optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not // updated the size and turned the tag to optional. TODO : WHY chunksize = 0; // not not skip // chunksize = value + ((tag & 0xff)<<16); // do not skip an unknown but optional chunk // These are only use to size subbands, but the data within should not be skipped // unless if((tag & 0xff00) == CODEC_TAG_UNCOMPRESS) { optional = true; chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord; decoder->uncompressed_size = chunksize*4; decoder->sample_uncompressed = 1; } } assert(optional); if(!optional) { error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG; } else if(chunksize > 0) // skip this option chunk { input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; } break; } return error; } void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band) { assert(decoder != NULL); assert(wavelet != NULL); if (decoder != NULL && wavelet != NULL) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif #if _THREADED_DECODER // Lock access to the wavelet data if(decoder->entropy_worker_new.pool.thread_count) Lock(&decoder->entropy_worker_new.lock); #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n", wavelet->band_valid_flags, BAND_VALID_MASK(band)); } #endif // Update the wavelet band flags wavelet->band_valid_flags |= BAND_VALID_MASK(band); wavelet->band_started_flags |= BAND_VALID_MASK(band); #if _THREADED_DECODER // Unlock access to the wavelet data if(decoder->entropy_worker_new.pool.thread_count) Unlock(&decoder->entropy_worker_new.lock); #endif } } void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band) { assert(decoder != NULL); assert(wavelet != NULL); if (decoder != NULL && wavelet != NULL) { // Update the wavelet band flags #if _DELAYED_THREAD_START==0 if(decoder->entropy_worker_new.pool.thread_count) Lock(&decoder->entropy_worker_new.lock); #endif wavelet->band_started_flags |= BAND_VALID_MASK(band); #if _DELAYED_THREAD_START==0 if(decoder->entropy_worker_new.pool.thread_count) Unlock(&decoder->entropy_worker_new.lock); #endif } } bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type) { uint32_t threaded_band_mask; uint32_t wavelet_band_mask; uint32_t decoded_band_mask; bool decoded_bands_valid; // Has this wavelet been created? if (wavelet == NULL) { // Too soon to wait for the wavelet bands to be decoded return false; } // Is this a fieldplus transform? if (transform_type == TRANSFORM_TYPE_FIELDPLUS) { // Is this the temporal wavelet? if (index == 2) { assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL); assert(wavelet->num_bands == 2); // Earlier transforms in the queue will compute both wavelet bands return true; } // Is this wavelet at the end of a chain of transforms? if (index == 3 || index == 5) { // Must wait for all bands to be decoded threaded_band_mask = 0; } else { // The lowpass band will be computed by transforms earlier in the queue threaded_band_mask = BAND_VALID_MASK(0); } } // Is this a spatial transform? else if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Is this wavelet at the top of the pyramid? if (index == 2) { // Must wait for all bands to be decoded threaded_band_mask = 0; } #if 0 // Is this wavelet at the bottom of the pyramid? else if (index == 0) { // Must wait for all bands to be decoded threaded_band_mask = 0; } #endif else { // The lowpass band will be computed by transforms earlier in the queue threaded_band_mask = BAND_VALID_MASK(0); } } else { // Unknown type of transform assert(0); // Assume that the bands are not valid return false; } // Compute the mask for the bands in this wavelet decoded_band_mask = ((1 << wavelet->num_bands) - 1); // Clear the bit for the band computed by the threaded transform decoded_band_mask &= ~threaded_band_mask; // Compute the wavelet bands that have been decoded wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask); // Have all of the bands not computed by the transform thread been decoded? decoded_bands_valid = (wavelet_band_mask == decoded_band_mask); return decoded_bands_valid; } void QueueThreadedTransform(DECODER *decoder, int channel, int index) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; TRANSFORM *transform = decoder->transform[channel]; //IMAGE *wavelet = transform->wavelet[index]; int precision = codec->precision; // The transform data structure must exist assert(transform != NULL); // The transform thread variables should have been created { int free_entry; #if _DELAYED_THREAD_START==0 // Lock access to the transform queue Lock(&decoder->entropy_worker_new.lock); #endif // Copy the transform parameters into the next queue entry free_entry = decoder->transform_queue.free_entry; assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH); if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH) { assert(transform != NULL); assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS); assert(0 <= index && index < TRANSFORM_MAX_WAVELETS); // Note: The wavelet may not exist when the transform is queued decoder->transform_queue.queue[free_entry].transform = transform; decoder->transform_queue.queue[free_entry].channel = channel; decoder->transform_queue.queue[free_entry].index = index; decoder->transform_queue.queue[free_entry].precision = precision; decoder->transform_queue.queue[free_entry].done = 0; // Update the transform request queue decoder->transform_queue.free_entry++; decoder->transform_queue.num_entries++; #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index); } #endif } #if _DELAYED_THREAD_START==0 Unlock(&decoder->entropy_worker_new.lock); #endif } } #if _THREADED_DECODER void WaitForTransformThread(DECODER *decoder) { if(decoder->entropy_worker_new.pool.thread_count) { #if _DELAYED_THREAD_START ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START); #endif ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool); decoder->transform_queue.started = 0; decoder->transform_queue.num_entries = 0; decoder->transform_queue.next_entry = 0; decoder->transform_queue.free_entry = 0; } } #endif #endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { int32_t lPreviousCount,i; // There are half as many input rows as output rows int transform_height = (((info->height+7)/8)*8) / 2; int middle_row_count = transform_height; // Post a message to the mailbox struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data; mailbox->type = THREAD_TRANSFORM_FRAME_YUV; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; // Set the semaphore to the number of rows decoder->interlaced_worker.current_row = 0; ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount); assert(lPreviousCount == 0); // Wake up both worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { SetEvent(decoder->interlaced_worker.start_event[i]); } // Wait for both worker threads to finish WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE); } void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels, PIXEL16U *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { int32_t lPreviousCount,i; // There are half as many input rows as output rows int transform_height = (((info->height+7)/8)*8) / 2; int middle_row_count = transform_height; // Post a message to the mailbox struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data; mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = (uint8_t *)output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; // Set the semaphore to the number of rows decoder->interlaced_worker.current_row = 0; ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount); assert(lPreviousCount == 0); // Wake up both worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { SetEvent(decoder->interlaced_worker.start_event[i]); } // Wait for both worker threads to finish WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE); } DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam) { DECODER *decoder = (DECODER *)lpParam; FILE *logfile = decoder->logfile; struct interlace_data *data = &decoder->interlaced_worker.interlace_data; int thread_index; HANDLE hObjects[2]; DWORD dwReturnValue; if(decoder->thread_cntrl.affinity) { HANDLE hCurrentThread = GetCurrentThread(); SetThreadAffinityMask(hCurrentThread,decoder->thread_cntrl.affinity); } // Set the handler for system exceptions #ifdef _WIN32 SetDefaultExceptionHandler(); #endif // Determine the index of this worker thread if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } thread_index = decoder->interlaced_worker.thread_count++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); // The transform worker variables should have been created assert(decoder->interlaced_worker.start_event[thread_index] != NULL); assert(decoder->interlaced_worker.row_semaphore != NULL); assert(decoder->interlaced_worker.done_event[thread_index] != NULL); assert(decoder->interlaced_worker.stop_event != NULL); if (!(decoder->interlaced_worker.start_event[thread_index] != NULL && decoder->interlaced_worker.row_semaphore != NULL && decoder->interlaced_worker.done_event[thread_index] != NULL && decoder->interlaced_worker.stop_event != NULL)) { return 1; } hObjects[0] = decoder->interlaced_worker.start_event[thread_index]; hObjects[1] = decoder->interlaced_worker.stop_event; for (;;) { // Wait for the signal to begin processing a transform dwReturnValue = WaitForMultipleObjects(2, hObjects, false, INFINITE); // Received a signal to begin inverse transform processing? if (dwReturnValue == WAIT_OBJECT_0) { int type; // Type of inverse transform to perform int frame_index; // Index of output frame to produce int num_channels; // Number of channels in the transform array uint8_t *output; // Output frame buffer int pitch; // Output frame pitch FRAME_INFO info; // Format of the output frame int chroma_offset; // Offset for the output chroma int precision; // Source pixel bit depth // Lock access to the transform data if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } // Get the processing parameters type = data->type; frame_index = data->frame; num_channels = data->num_channels; output = data->output; pitch = data->pitch; memcpy(&info, &data->info, sizeof(FRAME_INFO)); chroma_offset = data->chroma_offset; precision = data->precision; // Unlock access to the transform data if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); // Select the type of inverse transform to perform switch (type) { case THREAD_TRANSFORM_FRAME_YUV: //TODO: more to new _THREADED model TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels, output, pitch, &info, chroma_offset, precision); break; case THREAD_TRANSFORM_FRAME_ROW16U: //TODO: more to new _THREADED model TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels, (PIXEL16U *)output, pitch, &info, chroma_offset, precision); break; default: assert(0); break; } // Signal that this thread is done SetEvent(decoder->interlaced_worker.done_event[thread_index]); } else { // Should have a condition that causes the thread to terminate assert(dwReturnValue == WAIT_OBJECT_0+1 || dwReturnValue == WAIT_ABANDONED); break; } } return 0; } #endif void GetDecodedFrameDimensions(TRANSFORM **transform_array, int num_channels, int frame_index, int resolution, int *decoded_width_out, int *decoded_height_out) { IMAGE *wavelet = NULL; int decoded_scale = 0; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; // Get the decoding scale switch(resolution) { case DECODED_RESOLUTION_FULL_DEBAYER: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_FULL: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_HALF_NODEBAYER: case DECODED_RESOLUTION_HALF: #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_QUARTER: decoded_scale = 1; wavelet = transform_array[0]->wavelet[3]; break; case DECODED_RESOLUTION_LOWPASS_ONLY: decoded_scale = 1; wavelet = transform_array[0]->wavelet[5]; // Is this an intra frame? if (wavelet == NULL) { wavelet = transform_array[0]->wavelet[2]; } break; default: assert(0); break; } // Compute the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = decoded_scale * wavelet_width; decoded_height = decoded_scale * wavelet_height; if (decoded_width_out) { *decoded_width_out = decoded_width; } if (decoded_height_out) { *decoded_height_out = decoded_height; } } // Reconstruct Bayer format to the requested output format CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; int width = info->width; int height = info->height; //int resolution = info->resolution; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; switch (format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here. case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here. case DECODED_FORMAT_WP13: //DAN20090120 "" case DECODED_FORMAT_W13A: //DAN20101207 "" case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YUYV: //? case DECODED_FORMAT_UYVY: //? case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: error = CODEC_ERROR_OKAY; break; case DECODED_FORMAT_BYR2: case DECODED_FORMAT_BYR4: { //bool linearRestore = false; unsigned short *curve = NULL; if(decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0) { curve = decoder->BYR4LinearRestore; } ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve); } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; break; case DECODED_FORMAT_BYR3: ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch); decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; break; } if(error) return error; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif frame_size = width * height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } } // Using the RGBFilterBuffer16 as scratch space ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution); decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; #if _THREADED //DemosaicRAW { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int inverted = false; uint8_t *output = output_buffer; int pitch = output_pitch; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else error = CODEC_ERROR_UNSUPPORTED_FORMAT; #endif return error; } // Reconstruct uncompressed v210 YUV format to the requested output format CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; int width = info->width; int height = info->height; int resolution = info->resolution; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; if(format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; if(unc_Stride == output_pitch) memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size); else { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { memcpy(dst, src, smallest_Stride); src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } if((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { uint32_t *input_ptr = (uint32_t *)src; int pos = 0; int column=0,length = width; length -= length % 6; //DAN03252004 -- fix a memory overflow. for (column=0; column < length; column += 6) { uint32_t yuv; int y; int u; int v; // Read the first word yuv = *(input_ptr++); u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; // Expand the pixels to sixteen bits u <<= 6; y <<= 6; v <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; // Read the second word yuv = *(input_ptr++); y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; u <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; // Read the third word yuv = *(input_ptr++); v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; v <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; u <<= 6; // Read the fourth word yuv = *(input_ptr++); y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; v <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; } if(format == DECODED_FORMAT_UYVY) { for (column=0; column < pos; column += 2) { int t = dst[column]; dst[column] = dst[column+1]; dst[column+1] = t; } } src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } { // Expand YUV at the target resolution, and use the ActiveMetadata engine. // Need to allocate a scratch buffer for decoding the frame? if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer { //int pixel_size = 2 * sizeof(PIXEL16U); const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif int orig_width = width; if(resolution == DECODED_RESOLUTION_HALF) orig_width *= 2; if(resolution == DECODED_RESOLUTION_QUARTER) orig_width *= 4; if(decoder->RawBayer16) { #if _ALLOCATOR FreeAligned(allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #else MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #endif } #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = orig_width * 64; } } // unpack source original YUV into YU64? if(decoder->RawBayer16) { //uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; //uint8_t *dst = (uint8_t *)output_buffer; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_buffer; mailbox->pitch = output_pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else { int orig_width = width; int orig_height = height; int row,lines = 1; int start,end; if(resolution == DECODED_RESOLUTION_HALF) { orig_width *= 2; orig_height *= 2; lines = 2; } if(resolution == DECODED_RESOLUTION_QUARTER) { orig_width *= 4; orig_height *= 4; lines = 4; } start = 0; end = height; if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) { start = height-1; end = -1; } for (row = start; row != end; end > start ? row++ : row--) { int whitebitdepth = 16; int flags = 0; uint8_t *planar_output[3]; int planar_pitch[3]; ROI roi; PIXEL16U *y_row_ptr; PIXEL16U *u_row_ptr; PIXEL16U *v_row_ptr; PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16; PIXEL16U *scanline2 = scanline + orig_width * 8; unsigned short *sptr; int i,unc_Stride = decoder->uncompressed_size / orig_height; y_row_ptr = (PIXEL16U *)scanline; u_row_ptr = y_row_ptr + orig_width; v_row_ptr = u_row_ptr + orig_width/2; for(i=0; i<lines; i++) { src = (uint8_t *)decoder->uncompressed_chunk; src += row * unc_Stride; // Repack the row of 10-bit pixels into 16-bit pixels ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2); // Advance to the next rows in the input and output images y_row_ptr += orig_width*2; u_row_ptr = y_row_ptr + orig_width; v_row_ptr = u_row_ptr + orig_width/2; } y_row_ptr = (PIXEL16U *)scanline; u_row_ptr = y_row_ptr + width; v_row_ptr = u_row_ptr + width/2; if(lines == 2) { for(i=0; i<width*2;i++) y_row_ptr[i] = (y_row_ptr[i*2] + y_row_ptr[i*2+1] + y_row_ptr[orig_width*2+i*2] + y_row_ptr[orig_width*2+i*2+1]) >> 2; } else if(lines == 4) { for(i=0; i<width*2;i++) y_row_ptr[i] = (y_row_ptr[i*4] + y_row_ptr[i*4+2] + y_row_ptr[orig_width*2*2+i*4] + y_row_ptr[orig_width*2*2+i*4+2]) >> 2; } roi.width = width; roi.height = 1; planar_output[0] = (uint8_t *)y_row_ptr; planar_output[1] = (uint8_t *)v_row_ptr; planar_output[2] = (uint8_t *)u_row_ptr; planar_pitch[0] = 0; planar_pitch[1] = 0; planar_pitch[2] = 0; if(decoder->apply_color_active_metadata) { ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, (unsigned char *)scanline2, width, output_pitch, COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags); sptr = scanline2; sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline, info->format, &whitebitdepth, &flags); } else { ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, (unsigned char *)scanline2, width, output_pitch, COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags); sptr = scanline2; } ConvertLinesToOutput(decoder, width, 1, row, sptr, dst, output_pitch, format, whitebitdepth, flags); dst += output_pitch; } } #endif } error = CODEC_ERROR_OKAY; return error; } // Reconstruct uncompressed DPX0 RGB format to the requested output format CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; //int output_format = info->output_format; // used by image_dev_only decodes int width = info->width; int height = info->height; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; if( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; if(format != DECODED_FORMAT_DPX0) { int unc_Stride = decoder->uncompressed_size / height; ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format); } if(unc_Stride == output_pitch) memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size); else { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { memcpy(dst, src, smallest_Stride); src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } { // Expand YUV at the target resolution, and use the ActiveMetadata engine. // Need to allocate a scratch buffer for decoding the frame? if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer { //int pixel_size = 2 * sizeof(PIXEL16U); const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif int orig_width = width; if(resolution == DECODED_RESOLUTION_HALF) orig_width *= 2; if(resolution == DECODED_RESOLUTION_QUARTER) orig_width *= 4; if(decoder->RawBayer16) { #if _ALLOCATOR FreeAligned(allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #else MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #endif } #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = orig_width * 64; } } // unpack source original YUV into YU64? if(decoder->RawBayer16) { //uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; //uint8_t *dst = (uint8_t *)output_buffer; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_buffer; mailbox->pitch = output_pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else { int orig_width = width; int orig_height = height; int row,lines = 1; int start,end; if(resolution == DECODED_RESOLUTION_HALF) { orig_width *= 2; orig_height *= 2; lines = 2; } if(resolution == DECODED_RESOLUTION_QUARTER) { orig_width *= 4; orig_height *= 4; lines = 4; } start = 0; end = height; if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit { start = height-1; end = -1; } for (row = start; row != end; end > start ? row++ : row--) { int whitebitdepth = 16; int flags = 0; uint8_t *planar_output[3]; int planar_pitch[3]; ROI roi; PIXEL16U *y_row_ptr; PIXEL16U *u_row_ptr; PIXEL16U *v_row_ptr; PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16; PIXEL16U *scanline2 = scanline + orig_width * 8; unsigned short *sptr; int i,unc_Stride = decoder->uncompressed_size / orig_height; whitebitdepth = 13; if(decoder->apply_color_active_metadata) flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR; else flags = 0; roi.width = width; roi.height = 1; if(lines == 1) { uint16_t *sptr; uint32_t j,*lptr = (uint32_t *)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2); sptr = (uint16_t *)lptr; for(i=0; i<width;i+=8) { int val,r,g,b; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { if(decoder->image_dev_only) // HACK, currently assuming RG48 input data. { for(j=0; j<8; j++) { ptr[j] = sptr[0] >> 3; ptr[j+8] = sptr[1] >> 3; ptr[j+16] = sptr[2] >> 3; sptr += 3; } } else { for(j=0; j<8; j++) { val = SwapInt32(*lptr++); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; ptr[j] = r; ptr[j+8] = g; ptr[j+16] = b; } } } else { if(decoder->image_dev_only) // HACK, currently assuming RG48 input data. { for(j=0; j<8*3; j+=3) { ptr[j] = sptr[0] >> 3; ptr[j+1] = sptr[1] >> 3; ptr[j+2] = sptr[2] >> 3; sptr += 3; } } else { for(j=0; j<8*3; j+=3) { val = SwapInt32(*lptr++); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; ptr[j] = r; ptr[j+1] = g; ptr[j+2] = b; } } } ptr += 24; } } else if(lines == 2) { uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2) * lines; for(i=0; i<width;i+=8) { int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4; for(j=0; j<8; j++) { val = SwapInt32(lptr[0]); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; val = SwapInt32(lptr[1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[unc_Stride>>2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[(unc_Stride>>2)+1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { ptr[j] = r>>2; ptr[j+8] = g>>2; ptr[j+16] = b>>2; } else { ptr[j*3] = r>>2; ptr[j*3+1] = g>>2; ptr[j*3+2] = b>>2; } lptr += lines; } ptr += 24; } } else if(lines == 4) { uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2) * lines; for(i=0; i<width;i+=8) { int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4; for(j=0; j<8; j++) { val = SwapInt32(lptr[0]); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; val = SwapInt32(lptr[2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[unc_Stride>>1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[(unc_Stride>>1)+2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { ptr[j] = r>>2; ptr[j+8] = g>>2; ptr[j+16] = b>>2; } else { ptr[j*3] = r>>2; ptr[j*3+1] = g>>2; ptr[j*3+2] = b>>2; } lptr += lines; } ptr += 24; } } sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, width, 1, row, sptr, dst, output_pitch, format, whitebitdepth, flags); dst += output_pitch; } } #endif } error = CODEC_ERROR_OKAY; return error; } // Reconstruct Bayer format to the requested output format CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; int resolution = info->resolution; //int format = info->format; // Switch to the subroutine for the requested resolution switch (resolution) { case DECODED_RESOLUTION_FULL_DEBAYER: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //error = CODEC_ERROR_UNSUPPORTED_FORMAT; return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch); break; case DECODED_RESOLUTION_FULL: //return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; //case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: case DECODED_RESOLUTION_HALF_NODEBAYER: case DECODED_RESOLUTION_HALF: //return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case DECODED_RESOLUTION_QUARTER: //return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case DECODED_RESOLUTION_LOWPASS_ONLY: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; default: // The decoded resolution is not supported by this routine assert(0); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data to full resolution CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; int format = info->format; //int width = info->width; //int height = info->height; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { TRANSFORM **transform_array = decoder->transform; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int format = info->format; // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Compute the decoded width and height for the specified resolution GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (! (decoded_width > 0 && decoded_height > 0)) { return CODEC_ERROR_UNSUPPORTED_FORMAT; } frame_size = decoded_width * decoded_height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } //#endif } //TODO: Need to add more output formats to this routine switch (format) { case DECODED_FORMAT_RGB32: error = CODEC_ERROR_UNSUPPORTED_FORMAT; // Decode the last transform to rows of Bayer data (one row per channel) // TransformInverseSpatialToRow16u(transform_array, frame, num_channels, // decoder->RawBayer16, raw_bayer_pitch, info, // &decoder->scratch, chroma_offset, precision); // ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch, // output_buffer, output_pitch, // width, height); break; case DECODED_FORMAT_RGB24: error = CODEC_ERROR_UNSUPPORTED_FORMAT; // Decode the last transform to rows of Bayer data (one row per channel) //TransformInverseSpatialToRow16u(transform_array, frame, num_channels, // decoder->RawBayer16, raw_bayer_pitch, info, // &decoder->scratch, chroma_offset, precision); //ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch, // output_buffer, output_pitch, // width, height); break; default: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data and demosaic to full resolution CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; int format = info->format; int width = info->width; //int height = info->height; // Compute the number of bytes between each row of Bayer data int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; switch (format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here. case DECODED_FORMAT_WP13: //DAN20090120 "" case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: error = CODEC_ERROR_OKAY; break; } if(error) return error; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { TRANSFORM **transform_array = decoder->transform; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int format = info->format; // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Compute the decoded width and height for the specified resolution GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (! (decoded_width > 0 && decoded_height > 0)) { return CODEC_ERROR_UNSUPPORTED_FORMAT; } frame_size = decoded_width * decoded_height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } //#endif } #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, chroma_offset, precision); //DemosaicRAW { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int inverted = false; uint8_t *output = output_buffer; int pitch = output_pitch; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else error = CODEC_ERROR_UNSUPPORTED_FORMAT; #endif return error; } // Reconstruct Bayer encoded data to half resolution CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; int frame_width = info->width; int frame_height = info->height; //int resolution = info->resolution; int format = info->format; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; PIXEL16U *g1_plane; PIXEL16U *rg_plane; PIXEL16U *bg_plane; PIXEL16U *g2_plane; int g1_pitch; int rg_pitch; int bg_pitch; int g2_pitch; #if 0 int channel; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; char *format = decoded_format_string[info->format]; sprintf(label, "Output, channel: %d, format: %s", channel, format); DumpImageStatistics(label, lowpass_images[channel], logfile); } #endif } #endif // Get the lowpass bands in the wavelet coresponding to the output frame g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0]; rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0]; bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0]; if(transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK { g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0]; g2_pitch = transform_array[3]->wavelet[frame]->pitch; } else { g2_plane = NULL; g2_pitch = 0; } // Get the pitch of each plane g1_pitch = transform_array[0]->wavelet[frame]->pitch; rg_pitch = transform_array[1]->wavelet[frame]->pitch; bg_pitch = transform_array[2]->wavelet[frame]->pitch; switch (format) { case DECODED_FORMAT_RGB32: ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch, bg_plane, bg_pitch, g2_plane, g2_pitch, output_buffer, output_pitch, frame_width, frame_height); break; default: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data to quarter resolution CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //FRAME_INFO *info = &decoder->frame; //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; //int format = info->format; //TODO: Need to finish this routine assert(0); return error; } // Reconstruct the original YUV 4:2:2 encoded format to the requested output format CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &decoder->frame; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; int progressive = codec->progressive; int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; int resolution = info->resolution; int format = info->format; //int color_space = decoder->frame.colorspace; //TODO: Eliminate use of the chroma offset int chroma_offset = decoder->codec.chroma_offset; #if _THREADED // Type of threaded inverse transform //int type; #endif #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif if (decoder == NULL) { return CODEC_ERROR_INVALID_ARGUMENT; } //TODO: Split this routine into subroutines for progressive versus interlaced video //TODO: Split progressive and interlaced routines into subroutines for each resolution if(resolution == DECODED_RESOLUTION_HALF) { bool inverted = false; FRAME_INFO info2; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif if(decoder->use_active_metadata_decoder) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->framenum = frame; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; return CODEC_ERROR_OKAY; #endif } else { int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; int channel; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; } CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); } return CODEC_ERROR_OKAY; } // Was the video source interlaced or progressive? if (progressive) { // The video source was progressive (the first transform was a spatial transform) if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { FRAME_INFO info2; int format; bool inverted = false; int precision = codec->precision; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif /*if(decoder->use_active_metadata_decoder) { switch (format & 0x7ffffff) { case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for case DECODED_FORMAT_YUYV: // computing the active metadata. case DECODED_FORMAT_UYVY: return CODEC_ERROR_OKAY; break; } }*/ switch (format & 0x7ffffff) { case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } else { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sYUVtoRGB); return CODEC_ERROR_OKAY; #endif } break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } else { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToYUV); return CODEC_ERROR_OKAY; #endif } break; //Handle sizes that are smaller than the interim decode buffer //DAN20081222 case DECODED_FORMAT_CbYCrY_10bit_2_8: decoder->upper_plane = output; decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2; // Use the address and pitch of the lower plane output = decoder->lower_plane; pitch = decoder->frame.width * 2; // Fall through and compute the inverse spatial transform case DECODED_FORMAT_CbYCrY_16bit_2_14: case DECODED_FORMAT_CbYCrY_16bit_10_6: case DECODED_FORMAT_CbYCrY_8bit: case DECODED_FORMAT_CbYCrY_16bit: if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToOutput); return CODEC_ERROR_OKAY; } break; case DECODED_FORMAT_V210: if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalYUVStrip16sToYUVOutput); return CODEC_ERROR_OKAY; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED: // As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works. case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_B64A: case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: case DECODED_FORMAT_WP13: case DECODED_FORMAT_W13A: if((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false) { #if _THREADED TransformInverseSpatialThreadedYUV422ToBuffer(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); #elif 0 TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #else TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision, InvertHorizontalStripYUV16sToPackedRGB32); #endif return CODEC_ERROR_OKAY; } #if _THREADED if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); return CODEC_ERROR_OKAY; } #endif break; default: if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } // else Return the error code for unsupported output format break; } } } else { // The video source was interlaced (the first transform was a frame transform) if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { bool inverted = false; if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) { // info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active. inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif switch (format & 0x7ffffff) { case DECODED_FORMAT_NV12: case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder if(decoder->use_active_metadata_decoder) { int frame_size = info->width * info->height * 4; if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size) { #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size; } //TransformInverseSpatialUniversalThreadedToRow16u( // decoder, frame, num_channels, // (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2, // info, chroma_offset, precision); #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)decoder->RGBFilterBuffer16, info->width * 4, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)decoder->RGBFilterBuffer16, info->width * 4, info, &decoder->scratch, chroma_offset, precision); #endif #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 2; // yuv // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif return CODEC_ERROR_OKAY; } } switch (format) { // As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works. case DECODED_FORMAT_WP13: //DAN20110203 - missing case DECODED_FORMAT_W13A: //DAN20110203 - missing case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_B64A: case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into case DECODED_FORMAT_R408: //the output buffer case DECODED_FORMAT_V408: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)output, pitch, info, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); //Old code converts 4:2:2 directly to RGBA (single threaded.) //TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch, // info, &decoder->scratch, chroma_offset, precision); #endif return CODEC_ERROR_OKAY; default: // else Return the error code for unsupported output format break; } } } // The output format is not supported by this routine error = CODEC_ERROR_UNSUPPORTED_FORMAT; return error; } // Routines for converting the new encoded formats to the requested output format CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &decoder->frame; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; //IMAGE *wavelet; //int wavelet_width; //int wavelet_height; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; //int decoded_scale; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif //TODO: Eliminate use of the chroma offset if (decoder == NULL) { return CODEC_ERROR_INVALID_ARGUMENT; } // This routine should only be called for progressive frames assert(codec->progressive); // The decoder can decode a video sample without returning a frame if (output == NULL || pitch == 0) { return CODEC_ERROR_OKAY; } // Does this frame have to be reconstructed? if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) { return CODEC_ERROR_OKAY; } // Check that the requested frame is within the limits of the group of frames assert(0 <= frame && frame < decoder->gop_length); // Check that the frame resolution is valid assert(IsValidFrameResolution(resolution)); if (!IsValidFrameResolution(resolution)) { return CODEC_ERROR_RESOLUTION; } // Compute the decoded width and height ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) { output += (info->height-1)*pitch; pitch = -pitch; } #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = transform[0]->wavelet[frame]; int band = 0; fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band); DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Check that the requested frame is large enough to hold the decoded frame #if (0 && DEBUG) //if (! (info->width >= decoded_width)) { if (logfile) { //fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width); fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width); } } #endif assert(info->width >= decoded_width); if (!(info->width >= decoded_width)) { return CODEC_ERROR_FRAMESIZE; } // assert((info->height+7)/8 >= (decoded_height+7)/8); // if (!(info->height+7)/8 >= (decoded_height+7)/8) { // return CODEC_ERROR_FRAMESIZE; // } START(tk_convert); if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { //int precision = codec->precision; int scale = 13; int channel; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; int chroma_offset = decoder->codec.chroma_offset; //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); } else // Quarter resolution if (resolution == DECODED_RESOLUTION_QUARTER) { // Output quarter resolution for the two frame GOP int precision = codec->precision; // Reconstruct the frame to quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch, info, &decoder->scratch, precision); // Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame } else // Half resolution if (resolution == DECODED_RESOLUTION_HALF) { IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS]; int precision = codec->precision; int chroma_offset = 0; int channel; if(decoder->use_active_metadata_decoder) { #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->framenum = frame; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Get the first level wavelet in each channel for (channel = 0; channel < num_channels; channel++) { wavelet_array[channel] = transform_array[channel]->wavelet[frame]; } // Pack the pixels from the lowpass band in each channel into the output buffer CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch, info, chroma_offset, precision); } } // Full resolution or half horizontal else { int chroma_offset = 0; int precision = codec->precision; // Reconstruct the output frame from a full resolution decode //assert(resolution == DECODED_RESOLUTION_FULL); if(decoder->use_active_metadata_decoder) { int frame_size, channels = 3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) channels = 4; frame_size = info->width * info->height * channels * 2; if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size) { #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size; } #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2, info, chroma_offset, precision); #else // Decode that last transform to rows of Bayer data (one row per channel) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, (uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2, info, &decoder->scratch, chroma_offset, precision); #endif #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; switch (info->format) { case DECODED_FORMAT_B64A: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2B64A); #else TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif break; case DECODED_FORMAT_YU64: //TODO : Threading TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2RG30); #else TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); #else TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision, InvertHorizontalStripRGB16sToPackedYUV8u); #endif break; case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGBA2YUVA); #else assert(0); #endif break; case DECODED_FORMAT_YR16: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YR16); #else assert(0);// missing non-threaded version #endif break; case DECODED_FORMAT_V210: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2v210); #else assert(0);// missing non-threaded version #endif break; case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); #else assert(0);// missing non-threaded version #endif break; //TODO: Add code to handle other Avid pixel formats case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8 case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14 case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6 assert(0); break; default: #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Invalid decoded format: %d\n", info->format); } #endif assert(0); error = CODEC_ERROR_INVALID_FORMAT; break; } } } STOP(tk_convert); return error; } // Convert 16-bit signed lowpass data into the requested output format void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision) { bool inverted = false; int output_width = info->width; int output_height = info->height; int format = info->format; // Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values const int shift = 16 - precision - PRESCALE_LUMA; START(tk_convert); #if 0 // Fill the output buffer with blank values EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format); #endif // Determine the type of conversion switch (info->format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: inverted = true; case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32_INVERTED: case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: //WIP ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height, output_pitch, format, inverted, shift, num_channels); break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: { IMAGE *g_image = image_array[0]; IMAGE *r_image = image_array[1]; IMAGE *b_image = image_array[2]; if (info->format == COLOR_FORMAT_YUYV) { ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0], r_image->pitch, g_image->pitch, b_image->pitch, output_buffer, output_pitch, output_width, output_height, 14, info->colorspace, info->format); } else if (info->format == COLOR_FORMAT_UYVY) { ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0], r_image->pitch, g_image->pitch, b_image->pitch, output_buffer, output_pitch, output_width, output_height, 14, info->colorspace, info->format); } } break; default: { int y; IMAGE *g_image = image_array[0]; IMAGE *r_image = image_array[1]; IMAGE *b_image = image_array[2]; IMAGE *a_image = image_array[3]; unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr; //unsigned short *scanline2 = scanline + output_width*3; uint8_t *newline = (uint8_t *)output_buffer; unsigned short *Rptr,*Gptr,*Bptr,*Aptr = NULL; Rptr = (unsigned short *)r_image->band[0]; Gptr = (unsigned short *)g_image->band[0]; Bptr = (unsigned short *)b_image->band[0]; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { Aptr = (unsigned short *)a_image->band[0]; for(y=0; y<output_height; y++) { int flags = (ACTIVEMETADATA_PLANAR); int whitebitdepth = 14; memcpy(scanline, Rptr, info->width*2); memcpy(scanline+info->width, Gptr, info->width*2); memcpy(scanline+info->width*2, Bptr, info->width*2); memcpy(scanline+info->width*3, Aptr, info->width*2); Rptr += r_image->pitch/2; Gptr += g_image->pitch/2; Bptr += b_image->pitch/2; Aptr += a_image->pitch/2; Convert4444LinesToOutput(decoder, info->width, 1, y, scanline, newline, output_pitch, info->format, whitebitdepth, flags); newline += output_pitch; } } else { for(y=0; y<output_height; y++) { int flags = (ACTIVEMETADATA_PLANAR); int whitebitdepth = 14; memcpy(scanline, Rptr, info->width*2); memcpy(scanline+info->width, Gptr, info->width*2); memcpy(scanline+info->width*2, Bptr, info->width*2); Rptr += r_image->pitch/2; Gptr += g_image->pitch/2; Bptr += b_image->pitch/2; ConvertLinesToOutput(decoder, info->width, 1, y, scanline, newline, output_pitch, info->format, whitebitdepth, flags); newline += output_pitch; } } } //assert(0); break; } STOP(tk_convert); } #if _THREADED // Threaded inverse transform using the new threads API void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //TODO: Add support for more output formats int format = DECODED_FORMAT_RGB32; // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format HorizontalInverseFilterOutputProc horizontal_filter_proc; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Choose the correct inverse horizontal filter for the output format switch (format) { case DECODED_FORMAT_RGB32: horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32; break; default: assert(0); return; } // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #if (1 && DEBUG) if (logfile) { fprintf(logfile, "All worker threads signalled done\n"); } #endif } // Threaded inverse transform using the new threads API // Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format HorizontalInverseFilterOutputProc horizontal_filter_proc; horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } // Threaded inverse transform using the new threads API // Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format void TransformInverseSpatialUniversalThreadedToOutput( DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision, HorizontalInverseFilterOutputProc horizontal_filter_proc) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } // Routines for the worker threads that use the new threads API void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index, int frame_index, int num_channels, uint8_t *output_buffer, int output_pitch, FRAME_INFO *info, int chroma_offset, int precision, HorizontalInverseFilterOutputProc horizontal_filter_proc) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; PIXEL *lowlow_band[CODEC_MAX_CHANNELS]; PIXEL *lowhigh_band[CODEC_MAX_CHANNELS]; PIXEL *highlow_band[CODEC_MAX_CHANNELS]; PIXEL *highhigh_band[CODEC_MAX_CHANNELS]; int lowlow_pitch[CODEC_MAX_CHANNELS]; int lowhigh_pitch[CODEC_MAX_CHANNELS]; int highlow_pitch[CODEC_MAX_CHANNELS]; int highhigh_pitch[CODEC_MAX_CHANNELS]; int channel_width[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr; uint8_t *plane_array[TRANSFORM_MAX_CHANNELS]; int plane_pitch[TRANSFORM_MAX_CHANNELS]; int output_width = info->width; int output_height = info->height; int half_height = output_height/2; int luma_band_width; ROI strip; char *bufptr; int last_row; int last_display_row; int last_line; int channel; int row; int odd_display_lines = 0; THREAD_ERROR error; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; //TODO: Replace uses of buffer variables with calls to the scratch space API // This version is for 16-bit pixels assert(sizeof(PIXEL) == 2); // Must have a valid inverse horizontal filter assert(horizontal_filter_proc != NULL); // Check for enough space in the local array allocations // assert(num_channels <= CODEC_NUM_CHANNELS); assert(num_channels <= TRANSFORM_MAX_CHANNELS); // Divide the buffer space between the four threads buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4 buffer += buffer_size * thread_index; // Round the buffer pointer up to the next cache line buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK)); bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE); // Allocate buffer space for the output rows from each channel for (channel = 0; channel < num_channels; channel++) { // Get the row width for this channel IMAGE *wavelet = transform[channel]->wavelet[frame_index]; int width = wavelet->width; int height = wavelet->height; //int pitch = wavelet->pitch; size_t channel_buffer_size; // Compute the width and pitch for the output rows stored in this buffer int buffer_width = 2 * width; int buffer_height = 2; int buffer_pitch = ALIGN16(buffer_width); // Compute the total allocation for this channel channel_buffer_size = buffer_height * buffer_pitch; // Check that there is enough space available assert(channel_buffer_size <= buffer_size); // Allocate the buffer for this channel plane_array[channel] = (uint8_t *)bufptr; // Remember the pitch for rows in this channel plane_pitch[channel] = buffer_pitch; // Advance the buffer pointer past the allocated space for this channel bufptr += channel_buffer_size; // Reduce the amount of space remaining in the buffer buffer_size -= channel_buffer_size; // The dimensions of the output image are the same as the luma channel if (channel == 0) { strip.width = buffer_width; strip.height = buffer_height; last_row = height; //DAN20050606 Added to fix issue with non-div by 8 heihts. last_display_row = (info->height+1)/2; // DAN20090215 -- fix for odd display lines. odd_display_lines = info->height & 1; // Remember the width of the wavelet bands for luma luma_band_width = width; } // Save the bands per channel for routines that process all channels at once lowlow_band[channel] = wavelet->band[0]; lowhigh_band[channel] = wavelet->band[1]; highlow_band[channel] = wavelet->band[2]; highhigh_band[channel] = wavelet->band[3]; lowlow_pitch[channel] = wavelet->pitch; lowhigh_pitch[channel] = wavelet->pitch; highlow_pitch[channel] = wavelet->pitch; highhigh_pitch[channel] = wavelet->pitch; // Remember the width of the wavelet for this channel channel_width[channel] = width; } // Use the remaining buffer space for intermediate results buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK)); buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE); if (last_row == last_display_row) { last_line = half_height - 1; } else { last_line = half_height; } if(odd_display_lines) last_line++; if (thread_index == TRANSFORM_WORKER_TOP_THREAD) { // Process the first row row = 0; output_row_ptr = output_buffer; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif // Process the first row using special border filters for the top row InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, horizontal_filter_proc); } if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1) { if(last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash { int pitch = output_pitch; // Process the last row row = last_row - 1; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) pitch >>= 1; // Begin filling the last output row with results output_row_ptr = output_buffer + row * 2 * pitch; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif // Process the last row using special border filters for the bottom row if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix output_row_ptr -= output_pitch; InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, odd_display_lines, horizontal_filter_proc); } } // Loop until all of the middle rows have been processed for (;;) { int work_index; int row; // Wait for one row from each channel to process error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index); // Is there another row to process? if (error == THREAD_ERROR_OKAY) { int pitch = output_pitch; // Compute the next row to process from the work index row = work_index + 1; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked pitch >>= 1; // Compute the output row corresponding to this row index output_row_ptr = output_buffer + row * 2 * pitch; } else { // No more work to do return; } // Is the row inside the top and bottom border? if (0 < row && row < last_line) { int outputlines = 2; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif if(odd_display_lines && row==last_line-1) { outputlines = 1; } // Process the middle row using the normal wavelet filters InvertSpatialMiddleRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, horizontal_filter_proc, outputlines); } } } #endif //_THREADED bool GetTuplet(unsigned char *data, int datasize, unsigned short findtag, unsigned short *retvalue) { bool ret = false; BITSTREAM myinput, *pinput; TAGVALUE segment; TAGWORD tag,value; int error = 0; //char t[100]; InitBitstream(&myinput); myinput.lpCurrentWord = data; myinput.nWordsUsed = datasize; pinput = &myinput; do { bool optional = false; int chunksize = 0; // Read the next tag value pair from the bitstream segment = GetSegment(pinput); tag = segment.tuple.tag; value = segment.tuple.value; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); optional = true; } if(tag & 0x2000) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else if(tag & 0x4000) { chunksize = value; chunksize &= 0xffff; } else if(tag == CODEC_TAG_INDEX) { chunksize = value; chunksize &= 0xffff; } else { chunksize = 0; } if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000) { int skip = 1; error = 0; if(tag == (int)findtag) { *retvalue = value; ret = true; break; } if((tag & 0xff00) == 0x2200) //sample size { chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only. skip = 0; } if((tag & 0xff00) == 0x2300) //uncompressed sample size { skip = 1; } if((tag & 0xff00) == 0x2100) //level skip = 0; if(chunksize) { if(chunksize*4 > pinput->nWordsUsed || chunksize < 0) { break; } if(skip) { //unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord; pinput->lpCurrentWord += chunksize*4; pinput->nWordsUsed -= chunksize*4; } } } else { error = 1; } } while(tag != CODEC_TAG_GROUP_TRAILER && tag != CODEC_TAG_FRAME_TRAILER && pinput->nWordsUsed>0 && !error); return ret; } /*! Copied from metadata.cpp in the cedoc common directory */ uint8_t *GetTupletAddr(uint8_t *data, int datasize, uint16_t findtag, int16_t *retvalue) { unsigned char *ret = NULL; BITSTREAM myinput, *pinput; TAGVALUE segment; TAGWORD tag,value; int error = 0; if (data == NULL || datasize == 0) { return NULL; } //InitBitstream(&myinput); memset(&myinput, 0, sizeof(BITSTREAM)); myinput.lpCurrentWord = data; myinput.nWordsUsed = datasize; myinput.nBitsFree = BITSTREAM_LONG_SIZE; pinput = &myinput; do { //BOOL optional = FALSE; bool optional = false; int chunksize = 0; // Read the next tag value pair from the bitstream segment = GetSegment(pinput); tag = segment.tuple.tag; value = segment.tuple.value; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); //optional = TRUE; optional = true; } if(tag & 0x2000) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else if(tag & 0x4000) { chunksize = value; chunksize &= 0xffff; } else if(tag == CODEC_TAG_INDEX) { chunksize = value; chunksize &= 0xffff; } else { chunksize = 0; } if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000) { int skip = 1; error = 0; if(tag == (int)findtag) { *retvalue = value; ret = pinput->lpCurrentWord; break; } if((tag & 0xff00) == 0x2200) //sample size { chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only. skip = 0; } if((tag & 0xff00) == 0x2300) //uncompressed sample size { skip = 1; } if((tag & 0xff00) == 0x2100) //level skip = 0; if(chunksize) { if(chunksize*4 > pinput->nWordsUsed || chunksize < 0) { break; } if(skip) { //unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord; pinput->lpCurrentWord += chunksize*4; pinput->nWordsUsed -= chunksize*4; } } } else { error = 1; } } while(tag != CODEC_TAG_GROUP_TRAILER && tag != CODEC_TAG_FRAME_TRAILER && pinput->nWordsUsed>0 && !error); return ret; }
stepper.c
#include "stepper.h" #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <stdbool.h> #include <omp.h> #include <stdio.h> #define BLOCK_SIZE 4 //ldoc on /** * ## Implementation * * ### Structure allocation */ central2d_t* central2d_init(float w, float h, int nx, int ny, int nfield, flux_t flux, speed_t speed, float cfl) { // We extend to a four cell buffer to avoid BC comm on odd time steps int ng = 4; central2d_t* sim = (central2d_t*) malloc(sizeof(central2d_t)); sim->nx = nx; sim->ny = ny; sim->ng = ng; sim->nfield = nfield; sim->dx = w/nx; sim->dy = h/ny; sim->flux = flux; sim->speed = speed; sim->cfl = cfl; int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; int nc = nx_all * ny_all; int N = nfield * nc; sim->u = (float*) malloc((4*N + 6*nx_all)* sizeof(float)); sim->v = sim->u + N; sim->f = sim->u + 2*N; sim->g = sim->u + 3*N; sim->scratch = sim->u + 4*N; return sim; } void central2d_free(central2d_t* sim) { free(sim->u); free(sim); } int central2d_offset(central2d_t* sim, int k, int ix, int iy) { int nx = sim->nx, ny = sim->ny, ng = sim->ng; int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; return (k*ny_all+(ng+iy))*nx_all+(ng+ix); } /** * ### Boundary conditions * * In finite volume methods, boundary conditions are typically applied by * setting appropriate values in ghost cells. For our framework, we will * apply periodic boundary conditions; that is, waves that exit one side * of the domain will enter from the other side. * * We apply the conditions by assuming that the cells with coordinates * `nghost <= ix <= nx+nghost` and `nghost <= iy <= ny+nghost` are * "canonical", and setting the values for all other cells `(ix,iy)` * to the corresponding canonical values `(ix+p*nx,iy+q*ny)` for some * integers `p` and `q`. */ static inline void copy_subgrid(float* restrict dst, const float* restrict src, int nx, int ny, int stride1, int stride2) { for (int iy = 0; iy < ny; ++iy) for (int ix = 0; ix < nx; ++ix) dst[iy*stride1+ix] = src[iy*stride2+ix]; } static inline void copy_subgrid_allfield(float* restrict dst, const float* restrict src, int nx, int ny, int c1, int c2, int stride1, int stride2, int nfield) { for (int k = 0; k < nfield; ++k) { copy_subgrid(dst+k*c1, src+k*c2, nx, ny, stride1, stride2); } } static inline void print_grid(const float* restrict u, int nx, int ny, int stride) { for (int ix = 0; ix < nx; ++ix) { for (int iy = 0; iy < ny; ++iy) { printf("%.2f ", u[iy*stride+ix]); } printf("\n"); } printf("\n"); } void central2d_periodic_full(float* restrict u, int nx, int ny, int ng, int nfield) { // Stride and number per field int s = nx + 2*ng; int field_stride = (ny+2*ng)*s; // Offsets of left, right, top, and bottom data blocks and ghost blocks int l = nx, lg = 0; int r = ng, rg = nx+ng; int b = ny*s, bg = 0; int t = ng*s, tg = (nx+ng)*s; // Copy data into ghost cells on each side for (int k = 0; k < nfield; ++k) { float* uk = u + k*field_stride; copy_subgrid(uk+lg, uk+l, ng, ny+2*ng, s, s); copy_subgrid(uk+rg, uk+r, ng, ny+2*ng, s, s); copy_subgrid(uk+tg, uk+t, nx+2*ng, ng, s, s); copy_subgrid(uk+bg, uk+b, nx+2*ng, ng, s, s); } } void central2d_periodic(float* restrict u, const float* restrict src, int nx, int ny, int ng, int partx, int party, int px, int py, int nfield, int tbatch) { // Stride and number per field int ngu = ng*tbatch; int backng = ng*(tbatch-1); int s = nx + 2*ngu; int s2 = nx*partx + 2*ng; int field_stride = (ny+2*ngu)*s; int field_stride2 = (ny*party+2*ng)*s2; // Copy data into ghost cells on each side for (int k = 0; k < nfield; ++k) { float* uk = u + k*field_stride; const float* srck = src + k*field_stride2; int modxl = (px == 0? partx : px); int modxr = (px == partx-1? 0 : px+1); int modyb = (py == 0? party : py); int modyt = (py == party-1? 0 : py+1); copy_subgrid(uk, srck+(modyb*ny-backng)*s2+(modxl*nx-backng), ngu, ngu, s, s2); copy_subgrid(uk+ngu*s, srck+(ng+py*ny)*s2+modxl*nx-backng, ngu, ny, s, s2); copy_subgrid(uk+(ngu+ny)*s, srck+(ng+modyt*ny)*s2+modxl*nx-backng, ngu, ngu, s, s2); copy_subgrid(uk+ngu, srck+(modyb*ny-backng)*s2+px*nx+ng, nx, ngu, s, s2); copy_subgrid(uk+ngu*s+ngu, srck+(ng+py*ny)*s2+px*nx+ng, nx, ny, s, s2); copy_subgrid(uk+(ngu+ny)*s+ngu, srck+(ng+modyt*ny)*s2+px*nx+ng, nx, ngu, s, s2); copy_subgrid(uk+ngu+nx, srck+(modyb*ny-backng)*s2+modxr*nx+ng, ngu, ngu, s, s2); copy_subgrid(uk+ngu*s+nx+ngu, srck+(ng+py*ny)*s2+modxr*nx+ng, ngu, ny, s, s2); copy_subgrid(uk+(ngu+ny)*s+ngu+nx, srck+(ng+modyt*ny)*s2+modxr*nx+ng, ngu, ngu, s, s2); } } /** * ### Derivatives with limiters * * In order to advance the time step, we also need to estimate * derivatives of the fluxes and the solution values at each cell. * In order to maintain stability, we apply a limiter here. * * The minmod limiter *looks* like it should be expensive to computer, * since superficially it seems to require a number of branches. * We do something a little tricky, getting rid of the condition * on the sign of the arguments using the `copysign` instruction. * If the compiler does the "right" thing with `max` and `min` * for floating point arguments (translating them to branch-free * intrinsic operations), this implementation should be relatively fast. */ // Branch-free computation of minmod of two numbers times 2s static inline float xmin2s(float s, float a, float b) { float sa = copysignf(s, a); float sb = copysignf(s, b); float abs_a = fabsf(a); float abs_b = fabsf(b); float min_abs = (abs_a < abs_b ? abs_a : abs_b); return (sa+sb) * min_abs; } // Limited combined slope estimate static inline float limdiff(float um, float u0, float up) { const float theta = 2.0; const float quarter = 0.25; float du1 = u0-um; // Difference to left float du2 = up-u0; // Difference to right float duc = up-um; // Twice centered difference return xmin2s( quarter, xmin2s(theta, du1, du2), duc ); } // Compute limited derivs static inline void limited_deriv1(float* restrict du, const float* restrict u, int ncell) { for (int i = 0; i < ncell; ++i) du[i] = limdiff(u[i-1], u[i], u[i+1]); } // Compute limited derivs across stride static inline void limited_derivk(float* restrict du, const float* restrict u, int ncell, int stride) { assert(stride > 0); for (int i = 0; i < ncell; ++i) du[i] = limdiff(u[i-stride], u[i], u[i+stride]); } /** * ### Advancing a time step * * Take one step of the numerical scheme. This consists of two pieces: * a first-order corrector computed at a half time step, which is used * to obtain new $F$ and $G$ values; and a corrector step that computes * the solution at the full step. For full details, we refer to the * [Jiang and Tadmor paper][jt]. * * The `compute_step` function takes two arguments: the `io` flag * which is the time step modulo 2 (0 if even, 1 if odd); and the `dt` * flag, which actually determines the time step length. We need * to know the even-vs-odd distinction because the Jiang-Tadmor * scheme alternates between a primary grid (on even steps) and a * staggered grid (on odd steps). This means that the data at $(i,j)$ * in an even step and the data at $(i,j)$ in an odd step represent * values at different locations in space, offset by half a space step * in each direction. Every other step, we shift things back by one * mesh cell in each direction, essentially resetting to the primary * indexing scheme. * * We're slightly tricky in the corrector in that we write * $$ * v(i,j) = (s(i+1,j) + s(i,j)) - (d(i+1,j)-d(i,j)) * $$ * where $s(i,j)$ comprises the $u$ and $x$-derivative terms in the * update formula, and $d(i,j)$ the $y$-derivative terms. This cuts * the arithmetic cost a little (not that it's that big to start). * It also makes it more obvious that we only need four rows worth * of scratch space. */ // Predictor half-step static void central2d_predict(float* restrict v, float* restrict scratch, const float* restrict u, const float* restrict f, const float* restrict g, float dtcdx2, float dtcdy2, int nx, int ny, int nfield) { float* restrict fx = scratch; float* restrict gy = scratch+nx; for (int k = 0; k < nfield; ++k) { for (int iy = 1; iy < ny-1; ++iy) { int offset = (k*ny+iy)*nx+1; limited_deriv1(fx+1, f+offset, nx-2); limited_derivk(gy+1, g+offset, nx-2, nx); for (int ix = 1; ix < nx-1; ++ix) { int offset = (k*ny+iy)*nx+ix; v[offset] = u[offset] - dtcdx2 * fx[ix] - dtcdy2 * gy[ix]; } } } } // Corrector static void central2d_correct_sd(float* restrict s, float* restrict d, const float* restrict ux, const float* restrict uy, const float* restrict u, const float* restrict f, const float* restrict g, float dtcdx2, float dtcdy2, int xlo, int xhi) { for (int ix = xlo; ix < xhi; ++ix) s[ix] = 0.2500f * (u [ix] + u [ix+1]) + 0.0625f * (ux[ix] - ux[ix+1]) + dtcdx2 * (f [ix] - f [ix+1]); for (int ix = xlo; ix < xhi; ++ix) d[ix] = 0.0625f * (uy[ix] + uy[ix+1]) + dtcdy2 * (g [ix] + g [ix+1]); } // Corrector static void central2d_correct(float* restrict v, float* restrict scratch, const float* restrict u, const float* restrict f, const float* restrict g, float dtcdx2, float dtcdy2, int xlo, int xhi, int ylo, int yhi, int nx, int ny, int nfield) { assert(0 <= xlo && xlo < xhi && xhi <= nx); assert(0 <= ylo && ylo < yhi && yhi <= ny); float* restrict ux = scratch; float* restrict uy = scratch + nx; float* restrict s0 = scratch + 2*nx; float* restrict d0 = scratch + 3*nx; float* restrict s1 = scratch + 4*nx; float* restrict d1 = scratch + 5*nx; for (int k = 0; k < nfield; ++k) { float* restrict vk = v + k*ny*nx; const float* restrict uk = u + k*ny*nx; const float* restrict fk = f + k*ny*nx; const float* restrict gk = g + k*ny*nx; limited_deriv1(ux+1, uk+ylo*nx+1, nx-2); limited_derivk(uy+1, uk+ylo*nx+1, nx-2, nx); central2d_correct_sd(s1, d1, ux, uy, uk + ylo*nx, fk + ylo*nx, gk + ylo*nx, dtcdx2, dtcdy2, xlo, xhi); for (int iy = ylo; iy < yhi; ++iy) { float* tmp; tmp = s0; s0 = s1; s1 = tmp; tmp = d0; d0 = d1; d1 = tmp; limited_deriv1(ux+1, uk+(iy+1)*nx+1, nx-2); limited_derivk(uy+1, uk+(iy+1)*nx+1, nx-2, nx); central2d_correct_sd(s1, d1, ux, uy, uk + (iy+1)*nx, fk + (iy+1)*nx, gk + (iy+1)*nx, dtcdx2, dtcdy2, xlo, xhi); for (int ix = xlo; ix < xhi; ++ix) vk[iy*nx+ix] = (s1[ix]+s0[ix])-(d1[ix]-d0[ix]); } } } static void central2d_step(float* restrict u, float* restrict v, float* restrict scratch, float* restrict f, float* restrict g, int io, int nx, int ny, int ng, int nfield, flux_t flux, speed_t speed, float dt, float dx, float dy) { int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; float dtcdx2 = 0.5 * dt / dx; float dtcdy2 = 0.5 * dt / dy; flux(f, g, u, nx_all * ny_all, nx_all * ny_all); central2d_predict(v, scratch, u, f, g, dtcdx2, dtcdy2, nx_all, ny_all, nfield); // Flux values of f and g at half step for (int iy = 1; iy < ny_all-1; ++iy) { int jj = iy*nx_all+1; flux(f+jj, g+jj, v+jj, nx_all-2, nx_all * ny_all); } central2d_correct(v+io*(nx_all+1), scratch, u, f, g, dtcdx2, dtcdy2, ng-io, nx+ng-io, ng-io, ny+ng-io, nx_all, ny_all, nfield); } static void central2d_step_batch(float* restrict u, float* restrict v, float* restrict scratch, float* restrict f, float* restrict g, int nx, int ny, int ng, int nfield, flux_t flux, speed_t speed, float dt, float dx, float dy, int tbatch) { for (int b = 0; b < tbatch; ++b) { central2d_step(u, v, scratch, f, g, 0, nx+2*(ng*tbatch-(2*b+1)*ng/2), ny+2*(ng*tbatch-(2*b+1)*ng/2), (2*b+1)*ng/2, nfield, flux, speed, dt, dx, dy); central2d_step(v, u, scratch, f, g, 1, nx+2*ng*(tbatch-b-1), ny+2*ng*(tbatch-b-1), ng*(b+1), nfield, flux, speed, dt, dx, dy); } } /** * ### Advance a fixed time * * The `run` method advances from time 0 (initial conditions) to time * `tfinal`. Note that `run` can be called repeatedly; for example, * we might want to advance for a period of time, write out a picture, * advance more, and write another picture. In this sense, `tfinal` * should be interpreted as an offset from the time represented by * the simulator at the start of the call, rather than as an absolute time. * * We always take an even number of steps so that the solution * at the end lives on the main grid instead of the staggered grid. */ static int central2d_xrun(float* restrict u, float* restrict v, float* restrict scratch, float* restrict f, float* restrict g, int nx, int ny, int ng, int nfield, flux_t flux, speed_t speed, float tfinal, float dx, float dy, float cfl, int threads) { int nstep = 0; int tbatch = 1; int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; int c = nx_all * ny_all; int N = nfield * c; int partx = 2; int party = fmaxf(1, BLOCK_SIZE*threads/partx); omp_set_num_threads(threads); int sx = nx/partx; int sy = ny/party; int sx_all = sx + 2*tbatch*ng; int sy_all = sy + 2*tbatch*ng; int pc = sx_all * sy_all; int pN = nfield * pc; bool done = false; float t = 0; static float *pu; #pragma omp threadprivate(pu) #pragma omp parallel { pu = (float*) malloc((4*pN + 6*sx_all)* sizeof(float)); } while (!done) { float cxy[2] = {1.0e-15f, 1.0e-15f}; speed(cxy, u, nx_all * ny_all, nx_all * ny_all); float dt = cfl / fmaxf(cxy[0]/dx, cxy[1]/dy); if (t + 2*tbatch*dt >= tfinal) { dt = (tfinal-t)/2/tbatch; done = true; } #pragma omp parallel for collapse(2) for (int py = 0; py < party; py++) { for (int px = 0; px < partx; px++) { int thread = omp_get_thread_num(); float *pv = pu + pN; float *pf = pu+ 2*pN; float *pg = pu + 3*pN; float *pscratch = pu + 4*pN; central2d_periodic(pu, u, sx, sy, ng, partx, party, px, py, nfield, tbatch); central2d_step_batch(pu, pv, pscratch, pf, pg, sx, sy, ng, nfield, flux, speed, dt, dx, dy, tbatch); copy_subgrid_allfield(u+nx_all*(ng+py*sy)+(ng+px*sx),pu+tbatch*ng*sx_all+ng*tbatch, sx,sy,c,pc,nx_all,sx_all,nfield); } } t += 2*dt*tbatch; nstep += 2*tbatch; } #pragma omp parallel { free(pu); } return nstep; } int central2d_run(central2d_t* sim, float tfinal, int threads) { return central2d_xrun(sim->u, sim->v, sim->scratch, sim->f, sim->g, sim->nx, sim->ny, sim->ng, sim->nfield, sim->flux, sim->speed, tfinal, sim->dx, sim->dy, sim->cfl, threads); }
BenchUtils.h
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <chrono> #include <functional> #include <vector> #include <immintrin.h> #ifdef USE_BLAS #if __APPLE__ // not sure whether need to differentiate TARGET_OS_MAC or TARGET_OS_IPHONE, // etc. #include <Accelerate/Accelerate.h> #else #include <cblas.h> #endif #endif #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MKL #include <mkl.h> #endif #include "./AlignedVec.h" #include "fbgemm/FbgemmBuild.h" #include "fbgemm/FbgemmPackMatrixB.h" #include "src/RefImplementations.h" namespace fbgemm { template <typename T> void randFill(aligned_vector<T>& vec, T low, T high); void llc_flush(std::vector<char>& llc); // Same as omp_get_max_threads() when OpenMP is available, otherwise 1 int fbgemm_get_max_threads(); // Same as omp_get_num_threads() when OpenMP is available, otherwise 1 int fbgemm_get_num_threads(); // Same as omp_get_thread_num() when OpenMP is available, otherwise 0 int fbgemm_get_thread_num(); template <typename T> NOINLINE float cache_evict(const T& vec) { auto const size = vec.size(); auto const elemSize = sizeof(typename T::value_type); auto const dataSize = size * elemSize; const char* data = reinterpret_cast<const char*>(vec.data()); constexpr int CACHE_LINE_SIZE = 64; // Not having this dummy computation significantly slows down the computation // that follows. float dummy = 0.0f; for (std::size_t i = 0; i < dataSize; i += CACHE_LINE_SIZE) { dummy += data[i] * 1.0f; _mm_mfence(); #ifndef _MSC_VER asm volatile("" ::: "memory"); #endif _mm_clflush(&data[i]); } return dummy; } /** * Parse application command line arguments * */ int parseArgumentInt( int argc, const char* argv[], const char* arg, int non_exist_val, int def_val); bool parseArgumentBool( int argc, const char* argv[], const char* arg, bool def_val); namespace { struct empty_flush { void operator()() const {} }; } // namespace /** * @param Fn functor to execute * @param Fe data eviction functor */ template <class Fn, class Fe = std::function<void()>> double measureWithWarmup( Fn&& fn, int warmupIterations, int measuredIterations, const Fe& fe = empty_flush(), bool useOpenMP = false) { for (int i = 0; i < warmupIterations; ++i) { // Evict data first fe(); fn(); } double ttot = 0.0; #ifdef _OPENMP #pragma omp parallel if (useOpenMP) { #endif for (int i = 0; i < measuredIterations; ++i) { std::chrono::time_point<std::chrono::high_resolution_clock> start, end; const auto thread_id = useOpenMP ? fbgemm_get_thread_num() : 0; if (thread_id == 0) { fe(); } #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif start = std::chrono::high_resolution_clock::now(); fn(); #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif end = std::chrono::high_resolution_clock::now(); auto dur = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start); if (thread_id == 0) { // TODO: measure load imbalance ttot += dur.count(); } } #ifdef _OPENMP } #endif return ttot / 1e9 / measuredIterations; } /* * @brief Out-of-place transposition for M*N matrix ref. * @param M number of rows in input * @param K number of columns in input */ template <typename T> void transpose_matrix( int M, int N, const T* src, int ld_src, T* dst, int ld_dst) { for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { dst[i * ld_dst + j] = src[i + j * ld_src]; } } // for each output row } /* * @brief In-place transposition for nxk matrix ref. * @param n number of rows in input (number of columns in output) * @param k number of columns in input (number of rows in output) */ template <typename T> void transpose_matrix(T* ref, int n, int k) { std::vector<T> local(n * k); transpose_matrix(n, k, ref, k, local.data(), n); memcpy(ref, local.data(), n * k * sizeof(T)); } #ifdef USE_MKL void test_xerbla(char* srname, const int* info, int); #endif #define dataset 1 template <typename btype> void performance_test( int num_instances, bool flush, int repetitions, bool is_mkl) { #ifdef USE_MKL mkl_set_xerbla((XerblaEntry)test_xerbla); #endif (void)is_mkl; // Suppress unused variable warning float alpha = 1.f, beta = 1.f; matrix_op_t btran = matrix_op_t::Transpose; #if dataset == 1 const int NITER = (flush) ? 10 : 100; std::vector<std::vector<int>> shapes; for (auto m = 1; m < 120; m++) { // shapes.push_back({m, 128, 512}); shapes.push_back({m, 512, 512}); } #elif dataset == 2 const int NITER = (flush) ? 10 : 100; #include "shapes_dataset.h" #else flush = false; constexpr int NITER = 1; std::vector<std::vector<int>> shapes; std::random_device r; std::default_random_engine generator(r()); std::uniform_int_distribution<int> dm(1, 100); std::uniform_int_distribution<int> dnk(1, 1024); for (int i = 0; i < 1000; i++) { int m = dm(generator); int n = dnk(generator); int k = dnk(generator); shapes.push_back({m, n, k}); } #endif std::string type; double gflops, gbs, ttot; for (auto s : shapes) { int m = s[0]; int n = s[1]; int k = s[2]; // initialize with small numbers aligned_vector<int> Aint(m * k); randFill(Aint, 0, 4); std::vector<aligned_vector<float>> A; for (int i = 0; i < num_instances; ++i) { A.push_back(aligned_vector<float>(Aint.begin(), Aint.end())); } aligned_vector<int> Bint(k * n); randFill(Bint, 0, 4); aligned_vector<float> B(Bint.begin(), Bint.end()); std::vector<std::unique_ptr<PackedGemmMatrixB<btype>>> Bp; for (int i = 0; i < num_instances; ++i) { Bp.emplace_back(std::unique_ptr<PackedGemmMatrixB<btype>>( new PackedGemmMatrixB<btype>(btran, k, n, alpha, B.data()))); } auto kAligned = ((k * sizeof(float) + 64) & ~63) / sizeof(float); auto nAligned = ((n * sizeof(float) + 64) & ~63) / sizeof(float); std::vector<aligned_vector<float>> Bt(num_instances); auto& Bt_ref = Bt[0]; if (btran == matrix_op_t::Transpose) { Bt_ref.resize(k * nAligned); for (auto row = 0; row < k; ++row) { for (auto col = 0; col < n; ++col) { Bt_ref[row * nAligned + col] = alpha * B[col * k + row]; } } } else { Bt_ref.resize(kAligned * n); for (auto row = 0; row < k; ++row) { for (auto col = 0; col < n; ++col) { Bt_ref[col * kAligned + row] = alpha * B[col * k + row]; } } } for (auto i = 1; i < num_instances; ++i) { Bt[i] = Bt_ref; } std::vector<aligned_vector<float>> C_ref; std::vector<aligned_vector<float>> C_fb; if (beta != 0.0f) { aligned_vector<int> Cint(m * n); randFill(Cint, 0, 4); for (int i = 0; i < num_instances; ++i) { C_ref.push_back(aligned_vector<float>(Cint.begin(), Cint.end())); C_fb.push_back(aligned_vector<float>(Cint.begin(), Cint.end())); } } else { for (int i = 0; i < num_instances; ++i) { C_ref.push_back(aligned_vector<float>(m * n, 1.f)); C_fb.push_back(aligned_vector<float>(m * n, NAN)); } } double nflops = 2.0 * m * n * k; double nbytes = 4.0 * m * k + sizeof(btype) * 1.0 * k * n + 4.0 * m * n; // warm up MKL and fbgemm // check correctness at the same time for (auto w = 0; w < 3; w++) { #if defined(USE_MKL) || defined(USE_BLAS) cblas_sgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, // B is pretransposed, if required by operation m, n, k, 1.0, // Mutliplication by Alpha is done during transpose of B A[0].data(), k, Bt[0].data(), btran == matrix_op_t::NoTranspose ? kAligned : nAligned, beta, C_ref[0].data(), n); #else cblas_sgemm_ref( matrix_op_t::NoTranspose, matrix_op_t::NoTranspose, m, n, k, 1.0, A[0].data(), k, Bt[0].data(), (btran == matrix_op_t::NoTranspose) ? kAligned : nAligned, beta, C_ref[0].data(), n); #endif #ifdef _OPENMP #pragma omp parallel if (num_instances == 1) #endif { int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1; int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0; cblas_gemm_compute( matrix_op_t::NoTranspose, m, A[0].data(), *Bp[0], beta, C_fb[0].data(), tid, num_threads); } #if defined(USE_MKL) || defined(USE_BLAS) // Compare results for (size_t i = 0; i < C_ref[0].size(); i++) { if (std::abs(C_ref[0][i] - C_fb[0][i]) > 1e-3) { fprintf( stderr, "Error: too high diff between fp32 ref %f and fp16 %f at %ld\n", C_ref[0][i], C_fb[0][i], i); return; } } #endif } #ifdef USE_MKL if (is_mkl) { // Gold via MKL sgemm type = "MKL_FP32"; #elif defined(USE_BLAS) type = "BLAS_FP32"; #else type = "REF_FP32"; #endif ttot = measureWithWarmup( [&]() { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); for (int i = 0; i < repetitions; ++i) { #if defined(USE_MKL) || defined(USE_BLAS) cblas_sgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, A[copy].data(), k, Bt[copy].data(), btran == matrix_op_t::NoTranspose ? kAligned : nAligned, beta, C_ref[copy].data(), n); #else cblas_sgemm_ref( matrix_op_t::NoTranspose, matrix_op_t::NoTranspose, m, n, k, 1.0, A[copy].data(), k, Bt[copy].data(), (btran == matrix_op_t::NoTranspose) ? kAligned : nAligned, beta, C_ref[copy].data(), n); #endif } }, 3, NITER, [&]() { if (flush) { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); cache_evict(A[copy]); cache_evict(Bt[copy]); cache_evict(C_ref[copy]); } }, // Use OpenMP if num instances > 1 num_instances > 1); gflops = nflops / ttot / 1e9; gbs = nbytes / ttot / 1e9; printf( "\n%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n", type.c_str(), m, n, k, gflops * repetitions, gbs * repetitions); #ifdef USE_MKL } #endif type = "FBP_" + std::string(typeid(btype).name()); ttot = measureWithWarmup( [&]() { // When executing in data decomposition (single-instance) mode // Different threads will access different regions of the same // matrices. Thus, copy to be used is always 0. The numbers of // threads would be the as number of threads in the parallel // region. // When running in functional decomposition (multi-instance) mode // different matrices are used. The copy to be used selected by // thread_id (thread_num), and the number of threads performance // the compute of the same instance is 1. int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1; int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0; for (int i = 0; i < repetitions; ++i) { cblas_gemm_compute( matrix_op_t::NoTranspose, m, A[copy].data(), *Bp[copy], beta, C_fb[copy].data(), tid, num_threads); } }, 3, NITER, [&]() { if (flush) { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); cache_evict(A[copy]); cache_evict(*Bp[copy]); cache_evict(C_fb[copy]); } }, true /*useOpenMP*/); gflops = nflops / ttot / 1e9; gbs = nbytes / ttot / 1e9; printf( "%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n", type.c_str(), m, n, k, gflops * repetitions, gbs * repetitions); } } aligned_vector<float> getRandomSparseVector( unsigned size, float fractionNonZeros = 1.0); template <typename T> aligned_vector<T> getRandomBlockSparseMatrix( int Rows, int Cols, float fractionNonZerosBlocks = 1.0, int RowBlockSize = 4, int ColBlockSize = 1, T low = 0, T high = 9); } // namespace fbgemm
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "api/libperf.h" #include "lib/libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/sys/sock.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <sys/poll.h> #include <locale.h> #if defined (HAVE_MPI) # include <mpi.h> #elif defined (HAVE_RTE) # include<rte.h> #endif #define MAX_BATCH_FILES 32 #define MAX_CPUS 1024 #define TL_RESOURCE_NAME_NONE "<none>" #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCIqM:r:T:d:x:A:BUm:" #define TEST_ID_UNDEFINED -1 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; const char *overhead_lat; unsigned window_size; } test_type_t; typedef struct perftest_params { ucx_perf_params_t super; int test_id; } perftest_params_t; struct perftest_context { perftest_params_t params; const char *server_addr; int port; int mpi; unsigned num_cpus; unsigned cpus[MAX_CPUS]; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency", "latency", 1}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency", "latency", 1}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate", "latency", 1}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate", "latency", 1}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate", "latency", 1}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate", "overhead", 1}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate", "overhead", 1}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate", "overhead", 1}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency", "latency", 1}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth", "overhead", 32}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency", "latency", 1}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth", "overhead", 32}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth", "overhead", 32}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate", "overhead", 1}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate", "latency", 1}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate", "latency", 1}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate", "latency", 1}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth", "overhead", 1}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency", "latency", 1}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { typedef ssize_t (*sock_call)(int, void *, size_t, int); return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final, int is_server, int is_multi_thread) { static const char *fmt_csv; static const char *fmt_numeric; static const char *fmt_plain; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } #if _OPENMP if (!final) { printf("[thread %d]", omp_get_thread_num()); } else if (flags & TEST_FLAG_PRINT_RESULTS) { printf("Final: "); } #endif if (is_multi_thread && final) { fmt_csv = "%4.0f,%.3f,%.2f,%.0f\n"; fmt_numeric = "%'18.0f %29.3f %22.2f %'24.0f\n"; fmt_plain = "%18.0f %29.3f %22.2f %23.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.total_average * 1000000.0, result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.total_average); } else { fmt_csv = "%4.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; fmt_numeric = "%'18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %'11.0f %'11.0f\n"; fmt_plain = "%18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %11.0f %11.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); } fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *overhead_lat_str; const char *test_data_str; const char *test_api_str; test_type_t *test; unsigned i; test = (ctx->params.test_id == TEST_ID_UNDEFINED) ? NULL : &tests[ctx->params.test_id]; if ((ctx->flags & TEST_FLAG_PRINT_TEST) && (test != NULL)) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.super.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Send memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.send_mem_type]); printf("| Recv memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.recv_mem_type]); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params.super)); } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", ucs_basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { overhead_lat_str = (test == NULL) ? "overhead" : test->overhead_lat; printf("+--------------+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | | %8s (usec) | bandwidth (MB/s) | message rate (msg/s) |\n", overhead_lat_str); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| Stage | # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void print_memory_type_usage(void) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if (ucx_perf_mem_type_allocators[it] != NULL) { printf(" %s - %s\n", ucs_memory_type_names[it], ucs_memory_type_descs[it]); } } } static void usage(const struct perftest_context *ctx, const char *program) { static const char* api_names[] = { [UCX_PERF_API_UCT] = "UCT", [UCX_PERF_API_UCP] = "UCP" }; test_type_t *test; int UCS_V_UNUSED rank; #ifdef HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ctx->mpi && (rank != 0)) { return; } #endif #if defined (HAVE_MPI) printf(" Note: test can be also launched as an MPI application\n"); printf("\n"); #elif defined (HAVE_RTE) printf(" Note: this test can be also launched as an libRTE application\n"); printf("\n"); #endif printf(" Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); printf(" Common options:\n"); printf(" -t <test> test to run:\n"); for (test = tests; test->name; ++test) { printf(" %13s - %s %s\n", test->name, api_names[test->api], test->desc); } printf("\n"); printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n", ctx->params.super.msg_size_list[0]); printf(" for example: \"-s 16,48,8192,8192,14\"\n"); printf(" -m <send mem type>[,<recv mem type>]\n"); printf(" memory type of message for sender and receiver (host)\n"); print_memory_type_usage(); printf(" -n <iters> number of iterations to run (%"PRIu64")\n", ctx->params.super.max_iter); printf(" -w <iters> number of warm-up iterations (%"PRIu64")\n", ctx->params.super.warmup_iter); printf(" -c <cpulist> set affinity to this CPU list (separated by comma) (off)\n"); printf(" -O <count> maximal number of uncompleted outstanding sends\n"); printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n", ctx->params.super.iov_stride); printf(" -T <threads> number of threads in the test (%d)\n", ctx->params.super.thread_count); printf(" -o do not progress the responder in one-sided tests\n"); printf(" -B register memory with NONBLOCK flag\n"); printf(" -b <file> read and execute tests from a batch file: every line in the\n"); printf(" file is a test to run, first word is test name, the rest of\n"); printf(" the line is command-line arguments for the test.\n"); printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port); #ifdef HAVE_MPI printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h show this help message\n"); printf("\n"); printf(" Output format:\n"); printf(" -N use numeric formatting (thousands separator)\n"); printf(" -f print only final numbers\n"); printf(" -v print CSV-formatted output\n"); printf("\n"); printf(" UCT only:\n"); printf(" -d <device> device to use for testing\n"); printf(" -x <tl> transport to use for testing\n"); printf(" -D <layout> data layout for sender side:\n"); printf(" short - short messages (default, cannot be used for get)\n"); printf(" bcopy - copy-out (cannot be used for atomics)\n"); printf(" zcopy - zero-copy (cannot be used for atomics)\n"); printf(" iov - scatter-gather list (iovec)\n"); printf(" -W <count> flow control window size, for active messages (%u)\n", ctx->params.super.uct.fc_window); printf(" -H <size> active message header size (%zu)\n", ctx->params.super.am_hdr_size); printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n"); printf(" thread_spinlock - separate progress thread with spin locking\n"); printf(" thread_mutex - separate progress thread with mutex locking\n"); printf(" signal - signal-based timer\n"); printf("\n"); printf(" UCP only:\n"); printf(" -M <thread> thread support level for progress engine (single)\n"); printf(" single - only the master thread can access\n"); printf(" serialized - one thread can access at a time\n"); printf(" multi - multiple threads can access\n"); printf(" -D <layout>[,<layout>]\n"); printf(" data layout for sender and receiver side (contig)\n"); printf(" contig - Continuous datatype\n"); printf(" iov - Scatter-gather list\n"); printf(" -C use wild-card tag for tag tests\n"); printf(" -U force unexpected flow by using tag probe\n"); printf(" -r <mode> receive mode for stream tests (recv)\n"); printf(" recv : Use ucp_stream_recv_nb\n"); printf(" recv_data : Use ucp_stream_recv_data_nb\n"); printf(" -I create context with wakeup feature enabled\n"); printf("\n"); printf(" NOTE: When running UCP tests, transport and device should be specified by\n"); printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n"); printf("\n"); } static ucs_status_t parse_ucp_datatype_params(const char *opt_arg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(opt_arg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(opt_arg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_mem_type(const char *opt_arg, ucs_memory_type_t *mem_type) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if(!strcmp(opt_arg, ucs_memory_type_names[it]) && (ucx_perf_mem_type_allocators[it] != NULL)) { *mem_type = it; return UCS_OK; } } ucs_error("Unsupported memory type: \"%s\"", opt_arg); return UCS_ERR_INVALID_PARAM; } static ucs_status_t parse_mem_type_params(const char *opt_arg, ucs_memory_type_t *send_mem_type, ucs_memory_type_t *recv_mem_type) { const char *delim = ","; char *token = strtok((char*)opt_arg, delim); if (UCS_OK != parse_mem_type(token, send_mem_type)) { return UCS_ERR_INVALID_PARAM; } token = strtok(NULL, delim); if (NULL == token) { *recv_mem_type = *send_mem_type; return UCS_OK; } else { return parse_mem_type(token, recv_mem_type); } } static ucs_status_t parse_message_sizes_params(const char *opt_arg, ucx_perf_params_t *params) { const char delim = ','; size_t *msg_size_list, token_num, token_it; char *optarg_ptr, *optarg_ptr2; optarg_ptr = (char *)opt_arg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; msg_size_list = realloc(params->msg_size_list, sizeof(*params->msg_size_list) * token_num); if (NULL == msg_size_list) { return UCS_ERR_NO_MEMORY; } params->msg_size_list = msg_size_list; optarg_ptr = (char *)opt_arg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static ucs_status_t init_test_params(perftest_params_t *params) { memset(params, 0, sizeof(*params)); params->super.api = UCX_PERF_API_LAST; params->super.command = UCX_PERF_CMD_LAST; params->super.test_type = UCX_PERF_TEST_TYPE_LAST; params->super.thread_mode = UCS_THREAD_MODE_SINGLE; params->super.thread_count = 1; params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE; params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST; params->super.max_outstanding = 0; params->super.warmup_iter = 10000; params->super.am_hdr_size = 8; params->super.alignment = ucs_get_page_size(); params->super.max_iter = 1000000l; params->super.max_time = 0.0; params->super.report_interval = 1.0; params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE; params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->super.send_mem_type = UCS_MEMORY_TYPE_HOST; params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST; params->super.msg_size_cnt = 1; params->super.iov_stride = 0; params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE); params->super.msg_size_list = calloc(params->super.msg_size_cnt, sizeof(*params->super.msg_size_list)); if (params->super.msg_size_list == NULL) { return UCS_ERR_NO_MEMORY; } params->super.msg_size_list[0] = 8; params->test_id = TEST_ID_UNDEFINED; return UCS_OK; } static ucs_status_t parse_test_params(perftest_params_t *params, char opt, const char *opt_arg) { char *optarg2 = NULL; test_type_t *test; unsigned i; switch (opt) { case 'd': ucs_snprintf_zero(params->super.uct.dev_name, sizeof(params->super.uct.dev_name), "%s", opt_arg); return UCS_OK; case 'x': ucs_snprintf_zero(params->super.uct.tl_name, sizeof(params->super.uct.tl_name), "%s", opt_arg); return UCS_OK; case 't': for (i = 0; tests[i].name != NULL; ++i) { test = &tests[i]; if (!strcmp(opt_arg, test->name)) { params->super.api = test->api; params->super.command = test->command; params->super.test_type = test->test_type; params->test_id = i; break; } } if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (!strcmp(opt_arg, "short")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (!strcmp(opt_arg, "bcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (!strcmp(opt_arg, "zcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(opt_arg, &params->super.ucp.send_datatype)) { optarg2 = strchr(opt_arg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->super.ucp.recv_datatype)) { return UCS_ERR_INVALID_PARAM; } } } else { ucs_error("Invalid option argument for -D"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'i': params->super.iov_stride = atol(opt_arg); return UCS_OK; case 'n': params->super.max_iter = atol(opt_arg); return UCS_OK; case 's': return parse_message_sizes_params(opt_arg, &params->super); case 'H': params->super.am_hdr_size = atol(opt_arg); return UCS_OK; case 'W': params->super.uct.fc_window = atoi(opt_arg); return UCS_OK; case 'O': params->super.max_outstanding = atoi(opt_arg); return UCS_OK; case 'w': params->super.warmup_iter = atol(opt_arg); return UCS_OK; case 'o': params->super.flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->super.flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->super.flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'U': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE; return UCS_OK; case 'I': params->super.flags |= UCX_PERF_TEST_FLAG_WAKEUP; return UCS_OK; case 'M': if (!strcmp(opt_arg, "single")) { params->super.thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (!strcmp(opt_arg, "serialized")) { params->super.thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (!strcmp(opt_arg, "multi")) { params->super.thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->super.thread_count = atoi(opt_arg); return UCS_OK; case 'A': if (!strcmp(opt_arg, "thread") || !strcmp(opt_arg, "thread_spinlock")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK; return UCS_OK; } else if (!strcmp(opt_arg, "thread_mutex")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_MUTEX; return UCS_OK; } else if (!strcmp(opt_arg, "signal")) { params->super.async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } case 'r': if (!strcmp(opt_arg, "recv_data")) { params->super.flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } else if (!strcmp(opt_arg, "recv")) { params->super.flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } return UCS_ERR_INVALID_PARAM; case 'm': if (UCS_OK != parse_mem_type_params(opt_arg, &params->super.send_mem_type, &params->super.recv_mem_type)) { return UCS_ERR_INVALID_PARAM; } return UCS_OK; default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t adjust_test_params(perftest_params_t *params, const char *error_prefix) { test_type_t *test; if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("%smissing test name", error_prefix); return UCS_ERR_INVALID_PARAM; } test = &tests[params->test_id]; if (params->super.max_outstanding == 0) { params->super.max_outstanding = test->window_size; } return UCS_OK; } static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name, int *line_num, perftest_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; char error_prefix[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } ++(*line_num); argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); ucs_snprintf_safe(error_prefix, sizeof(error_prefix), "in batch file '%s' line %d: ", file_name, *line_num); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("%s-%c %s: %s", error_prefix, c, optarg, ucs_status_string(status)); return status; } } status = adjust_test_params(params, error_prefix); if (status != UCS_OK) { return status; } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_cpus(char *opt_arg, struct perftest_context *ctx) { char *endptr, *cpu_list = opt_arg; int cpu; ctx->num_cpus = 0; cpu = strtol(cpu_list, &endptr, 10); while (((*endptr == ',') || (*endptr == '\0')) && (ctx->num_cpus < MAX_CPUS)) { if (cpu < 0) { ucs_error("invalid cpu number detected: (%d)", cpu); return UCS_ERR_INVALID_PARAM; } ctx->cpus[ctx->num_cpus++] = cpu; if (*endptr == '\0') { break; } cpu_list = endptr + 1; /* skip the comma */ cpu = strtol(cpu_list, &endptr, 10); } if (*endptr == ',') { ucs_error("number of listed cpus exceeds the maximum supported value (%d)", MAX_CPUS); return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); ucx_perf_global_init(); /* initialize memory types */ status = init_test_params(&ctx->params); if (status != UCS_OK) { return status; } ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; ctx->mpi = mpi_initialized; optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = optarg; } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; status = parse_cpus(optarg, ctx); if (status != UCS_OK) { return status; } break; case 'P': #ifdef HAVE_MPI ctx->mpi = atoi(optarg) && mpi_initialized; break; #endif case 'h': usage(ctx, ucs_basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, ucs_basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned snc; snc = magic; safe_send(group->connfd, &snc, sizeof(unsigned), progress, arg); snc = 0; safe_recv(group->connfd, &snc, sizeof(unsigned), progress, arg); ucs_assert(snc == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (status != UCS_OK) { goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); /* release the memory for the list of the message sizes allocated * during the initialization of the default testing parameters */ free(ctx->params.super.msg_size_list); ctx->params.super.msg_size_list = NULL; ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } if (ctx->params.super.msg_size_cnt != 0) { ctx->params.super.msg_size_list = calloc(ctx->params.super.msg_size_cnt, sizeof(*ctx->params.super.msg_size_list)); if (NULL == ctx->params.super.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } ret = safe_recv(connfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.super.msg_size_cnt != 0) { safe_send(sockfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = &ctx->sock_rte_group; ctx->params.super.rte = &sock_rte; ctx->params.super.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if defined (HAVE_MPI) static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp barrier #pragma omp master { /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } #elif defined (HAVE_RTE) static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { #if defined (HAVE_MPI) static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; int size, rank; ucs_trace_func(""); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = NULL; ctx->params.super.rte = &mpi_rte; ctx->params.super.report_arg = ctx; #elif defined (HAVE_RTE) ucs_trace_func(""); ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = group; ctx->params.super.rte = &ext_rte; ctx->params.super.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #ifdef HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { ucs_sys_cpuset_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { for (i = 0; i < ctx->num_cpus; i++) { if (ctx->cpus[i] >= nr_cpus) { ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } } for (i = 0; i < ctx->num_cpus; i++) { CPU_SET(ctx->cpus[i], &cpuset); } ret = ucs_sys_setaffinity(&cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = ucs_sys_getaffinity(&cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t clone_params(perftest_params_t *dest, const perftest_params_t *src) { size_t msg_size_list_size; *dest = *src; msg_size_list_size = dest->super.msg_size_cnt * sizeof(*dest->super.msg_size_list); dest->super.msg_size_list = malloc(msg_size_list_size); if (dest->super.msg_size_list == NULL) { return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK); } memcpy(dest->super.msg_size_list, src->super.msg_size_list, msg_size_list_size); return UCS_OK; } static ucs_status_t run_test_recurs(struct perftest_context *ctx, const perftest_params_t *parent_params, unsigned depth) { perftest_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; int line_num; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (parent_params->super.api == UCX_PERF_API_UCP) { if (strcmp(parent_params->super.uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.dev_name); } if (strcmp(parent_params->super.uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.tl_name); } } if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(&parent_params->super, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } line_num = 0; do { status = clone_params(&params, parent_params); if (status != UCS_OK) { goto out; } status = read_batch_file(batch_file, ctx->batch_files[depth], &line_num, &params, &ctx->test_names[depth]); if (status == UCS_OK) { run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); ctx->test_names[depth] = NULL; } free(params.super.msg_size_list); params.super.msg_size_list = NULL; } while (status == UCS_OK); if (status == UCS_ERR_NO_ELEM) { status = UCS_OK; } out: fclose(batch_file); return status; } static ucs_status_t run_test(struct perftest_context *ctx) { const char *error_prefix; ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); /* no batch files, only command line params */ if (ctx->num_batch_files == 0) { error_prefix = (ctx->flags & TEST_FLAG_PRINT_RESULTS) ? "command line: " : ""; status = adjust_test_params(&ctx->params, error_prefix); if (status != UCS_OK) { return status; } } print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #ifdef HAVE_MPI int provided; mpi_initialized = !isatty(0) && /* Using MPI_THREAD_FUNNELED since ucx_perftest supports * using multiple threads when only the main one makes * MPI calls (which is also suitable for a single threaded * run). * MPI_THREAD_FUNNELED: * The process may be multi-threaded, but only the main * thread will make MPI calls (all MPI calls are funneled * to the main thread). */ (MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0); if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) { printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n", provided); ret = -1; goto out; } #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out_msg_size_list; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #ifdef HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out_msg_size_list: free(ctx.params.super.msg_size_list); #if HAVE_MPI out: #endif if (mpi_initialized) { #ifdef HAVE_MPI MPI_Finalize(); #endif } return ret; }
kmp_atomic_float10_max_min.c
// RUN: %libomp-compile -mlong-double-80 && %libomp-run // UNSUPPORTED: gcc // REQUIRES: x86-registered-target #include <stdio.h> #include <omp.h> // Used to detect architecture #include "../../src/kmp_platform.h" #ifdef __cplusplus extern "C" { #endif typedef void* ident_t; extern void __kmpc_atomic_float10_max(ident_t *id_ref, int gtid, long double *lhs, long double rhs); extern void __kmpc_atomic_float10_min(ident_t *id_ref, int gtid, long double *lhs, long double rhs); extern long double __kmpc_atomic_float10_max_cpt(ident_t *id_ref, int gtid, long double *lhs, long double rhs, int flag); extern long double __kmpc_atomic_float10_min_cpt(ident_t *id_ref, int gtid, long double *lhs, long double rhs, int flag); #ifdef __cplusplus } #endif int main() { int ret = 0; #if KMP_ARCH_X86 || KMP_ARCH_X86_64 long double s = 012.3456; // small long double e = 123.4567; // middle long double d = 234.5678; // big long double x = 123.4567; // object long double v = 0.; // captured value // initialize OpenMP runtime library omp_set_num_threads(4); // max // #pragma omp atomic compare update // if (x < d) x = d; __kmpc_atomic_float10_max(NULL, 0, &x, d); if (x != d) { ret++; printf("Error max: %Lf != %Lf\n", x, d); } __kmpc_atomic_float10_max(NULL, 0, &x, s); // no-op if (x != d) { ret++; printf("Error max: %Lf != %Lf\n", x, d); } // min // #pragma omp atomic compare update // if (x > s) x = s; __kmpc_atomic_float10_min(NULL, 0, &x, s); if (x != s) { ret++; printf("Error min: %Lf != %Lf\n", x, s); } __kmpc_atomic_float10_min(NULL, 0, &x, e); // no-op if (x != s) { ret++; printf("Error min: %Lf != %Lf\n", x, s); } // max_cpt old // #pragma omp atomic compare update capture // { v = x; if (x < d) x = d; } v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, d, 0); if (x != d) { ret++; printf("Error max_cpt obj: %Lf != %Lf\n", x, d); } if (v != s) { ret++; printf("Error max_cpt cpt: %Lf != %Lf\n", v, s); } v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, e, 0); // no-op if (x != d) { ret++; printf("Error max_cpt obj: %Lf != %Lf\n", x, d); } if (v != d) { ret++; printf("Error max_cpt cpt: %Lf != %Lf\n", v, d); } // min_cpt old // #pragma omp atomic compare update capture // { v = x; if (x > d) x = d; } v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, s, 0); if (x != s) { ret++; printf("Error min_cpt obj: %Lf != %Lf\n", x, s); } if (v != d) { ret++; printf("Error min_cpt cpt: %Lf != %Lf\n", v, d); } v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, e, 0); // no-op if (x != s) { ret++; printf("Error max_cpt obj: %Lf != %Lf\n", x, s); } if (v != s) { ret++; printf("Error max_cpt cpt: %Lf != %Lf\n", v, s); } // max_cpt new // #pragma omp atomic compare update capture // { if (x < d) x = d; v = x; } v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, d, 1); if (x != d) { ret++; printf("Error max_cpt obj: %Lf != %Lf\n", x, d); } if (v != d) { ret++; printf("Error max_cpt cpt: %Lf != %Lf\n", v, d); } v = __kmpc_atomic_float10_max_cpt(NULL, 0, &x, e, 1); // no-op if (x != d) { ret++; printf("Error max_cpt obj: %Lf != %Lf\n", x, d); } if (v != d) { ret++; printf("Error max_cpt cpt: %Lf != %Lf\n", v, d); } // min_cpt new // #pragma omp atomic compare update capture // { if (x > d) x = d; v = x; } v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, s, 1); if (x != s) { ret++; printf("Error min_cpt obj: %Lf != %Lf\n", x, s); } if (v != s) { ret++; printf("Error min_cpt cpt: %Lf != %Lf\n", v, s); } v = __kmpc_atomic_float10_min_cpt(NULL, 0, &x, e, 1); // no-op if (x != s) { ret++; printf("Error max_cpt obj: %Lf != %Lf\n", x, s); } if (v != s) { ret++; printf("Error max_cpt cpt: %Lf != %Lf\n", v, s); } if (ret == 0) printf("passed\n"); #else printf("Unsupported architecture, skipping test...\n"); #endif // KMP_ARCH_X86 || KMP_ARCH_X86_64 return ret; }
gemm.c
#include <stdlib.h> #include <math.h> #include "standard.h" void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; //clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,0,m,n,k,1,a,lda,b,ldb,1,c,n); } // //end = clock(); //printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { int i; for (i = 0; i < g_num_threads; i++) { if ((!TA && TB) || (!TA && !TB)) { g_gemm_args_pointer[i].i_start = (i+1) * M / (g_num_threads + 1); g_gemm_args_pointer[i].M = g_gemm_args_pointer[i].i_start + M / (g_num_threads + 1); g_gemm_args_pointer[i].K = K; } if (TA && !TB) { g_gemm_args_pointer[i].i_start = (i+1) * K / (g_num_threads + 1); g_gemm_args_pointer[i].K = g_gemm_args_pointer[i].i_start + K / (g_num_threads + 1); g_gemm_args_pointer[i].M = M; } g_gemm_args_pointer[i].TA = TA; g_gemm_args_pointer[i].TB = TB; g_gemm_args_pointer[i].N = N; g_gemm_args_pointer[i].ALPHA = ALPHA; g_gemm_args_pointer[i].A = A; g_gemm_args_pointer[i].lda = lda; g_gemm_args_pointer[i].B = B; g_gemm_args_pointer[i].BETA = BETA; g_gemm_args_pointer[i].ldb = ldb; g_gemm_args_pointer[i].C = C; g_gemm_args_pointer[i].ldc = ldc; sgx_spin_unlock(&g_spin_locks[i]); } // ocall_start_measuring_training(9, 10); if ((!TA && TB) || (!TA && !TB)) { gemm_cpu(TA, TB, 0, M / (g_num_threads + 1), N, K, ALPHA, A, lda, B, ldb, BETA, C, ldc); } if (TA && !TB) { gemm_cpu(TA, TB, 0, M, N, K / (g_num_threads + 1), ALPHA, A, lda, B, ldb, BETA, C, ldc); } // ocall_end_measuring_training(9, 10); // printf("before wait\n"); int waiting = 1; while (waiting) { waiting = 0; for (i = 0; i < g_num_threads; i++) { if (g_finished[i] == 0) waiting = 1; } } // printf("after wait\n"); for (i = 0; i < g_num_threads; i++) sgx_spin_lock(&g_spin_locks[i]); // printf("nach locked\n"); for (i = 0; i < g_num_threads; i++) g_finished[i] = 0; // printf("both locked again\n"); // gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int i_start, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; // #pragma omp parallel for for(i = i_start; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int i_start, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; // #pragma omp parallel for for(i = i_start; i < M; ++i){ // 10 for(j = 0; j < N; ++j){ // 1000 register float sum = 0; for(k = 0; k < K; ++k){ // 784 sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; // if (k > K - 10 && j == 0) // printf("sum=%f, k=%d\n", sum, k); } C[i*ldc+j] += sum; // if (j > N - 10) // printf("C=%f", C[i*ldc+j]); } } } void gemm_tn(int i_start, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; // #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = i_start; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int i_start, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; // #pragma omp parallel for for(i = i_start; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int i_start, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { ////printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = i_start; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) { gemm_nt(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } else gemm_tt(i_start, M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef GPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; //clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } // end = clock(); ////printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; //clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaThreadSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); //double seconds = sec(end-start); //printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); ////printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); ////printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { ////printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } //printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
radix_sort.h
#ifndef _PCL_RADIX_SORT_ #define _PCL_RADIX_SORT_ #include <utility> #include <omp.h> template<typename T> using Key_Value_Pair = std::pair<T, T>; template<typename T> Key_Value_Pair<T>* radix_sort_parallel(Key_Value_Pair<T>* inp_buf, Key_Value_Pair<T>* tmp_buf, int64_t elements_count, int64_t max_value) { int maxthreads = omp_get_max_threads(); int histogram[256*maxthreads], histogram_ps[256*maxthreads + 1]; if(max_value == 0) return inp_buf; int num_bits = sizeof(T) * 8 - __builtin_clz(max_value); int num_passes = (num_bits + 7) / 8; #pragma omp parallel { int tid = omp_get_thread_num(); int nthreads = omp_get_num_threads(); int * local_histogram = &histogram[256*tid]; int * local_histogram_ps = &histogram_ps[256*tid]; int elements_count_4 = elements_count/4*4; Key_Value_Pair<T> * input = inp_buf; Key_Value_Pair<T> * output = tmp_buf; for(unsigned int pass = 0; pass < num_passes; pass++) { /* Step 1: compute histogram Reset histogram */ for(int i = 0; i < 256; i++) local_histogram[i] = 0; #pragma omp for schedule(static) for(int64_t i = 0; i < elements_count_4; i+=4) { T val_1 = input[i].first; T val_2 = input[i+1].first; T val_3 = input[i+2].first; T val_4 = input[i+3].first; local_histogram[ (val_1>>(pass*8)) &0xFF]++; local_histogram[ (val_2>>(pass*8)) &0xFF]++; local_histogram[ (val_3>>(pass*8)) &0xFF]++; local_histogram[ (val_4>>(pass*8)) &0xFF]++; } if(tid == (nthreads -1)) { for(int64_t i = elements_count_4; i < elements_count; i++) { T val = input[i].first; local_histogram[ (val>>(pass*8)) &0xFF]++; } } #pragma omp barrier /* Step 2: prefix sum */ if(tid == 0) { int sum = 0, prev_sum = 0; for(int bins = 0; bins < 256; bins++) for(int t = 0; t < nthreads; t++) { sum += histogram[t*256 + bins]; histogram_ps[t*256 + bins] = prev_sum; prev_sum = sum; } histogram_ps[256*nthreads] = prev_sum; if(prev_sum != elements_count) { printf("Error1!\n"); exit(123); } } #pragma omp barrier /* Step 3: scatter */ #pragma omp for schedule(static) for(int64_t i = 0; i < elements_count_4; i+=4) { T val_1 = input[i].first; T val_2 = input[i+1].first; T val_3 = input[i+2].first; T val_4 = input[i+3].first; T bin_1 = (val_1>>(pass*8)) &0xFF; T bin_2 = (val_2>>(pass*8)) &0xFF; T bin_3 = (val_3>>(pass*8)) &0xFF; T bin_4 = (val_4>>(pass*8)) &0xFF; int pos; pos = local_histogram_ps[bin_1]++; output[pos] = input[i]; pos = local_histogram_ps[bin_2]++; output[pos] = input[i+1]; pos = local_histogram_ps[bin_3]++; output[pos] = input[i+2]; pos = local_histogram_ps[bin_4]++; output[pos] = input[i+3]; } if(tid == (nthreads -1)) { for(int64_t i = elements_count_4; i < elements_count; i++) { T val = input[i].first; int pos = local_histogram_ps[ (val>>(pass*8)) &0xFF]++; output[pos] = input[i]; } } Key_Value_Pair<T> * temp = input; input = output; output = temp; #pragma omp barrier } } return (num_passes % 2 == 0 ? inp_buf : tmp_buf); } #endif
sgbuf.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif #include "sgtype.h" #include "sgbuf.h" #include "mt64.h" #include "vrand.h" void random_data(sgData_t *buf, size_t len){ #ifdef _OPENMP int nt = omp_get_max_threads(); #else int nt = 1; #endif #pragma omp parallel for num_threads(nt) schedule(static, 1) for(int i=0; i<nt; i++) { init_genrand64(0x1337ULL + i); } #pragma omp parallel for shared(buf,len) num_threads(nt) for(size_t i = 0; i < len; i++){ buf[i] = genrand64_int63() % 10; } } void linear_indices(sgIdx_t *idx, size_t len, size_t worksets, size_t stride, int randomize){ sgIdx_t *idx_cur = idx; for(size_t j = 0; j < worksets; j++){ for(size_t i = 0; i < len; i++){ idx_cur[i] = i * stride; } idx_cur = idx_cur + len; } // Fisher-Yates Shuffling if(randomize){ unsigned long long init[4] = {0x12345ULL, 0x23456ULL, 0x34567ULL,0x45678ULL}; int length = 4; init_by_array64(init, length); for(size_t i = 0; i < len-2; i++){ size_t j = (genrand64_int64() % (len-i)) + i; for(size_t k = 0; k < worksets; k++) { size_t tmp = idx[k*len+i]; idx[k*len+i] = idx[k*len+j]; idx[k*len+j] = tmp; } } } } void wrap_indices(sgIdx_t *idx, size_t len, size_t worksets, size_t stride, size_t wrap) { if(wrap > stride || stride == 1){ linear_indices(idx, len, worksets, stride, 0); return; } sgIdx_t *idx_cur = idx; for(size_t j = 0; j < worksets; j++) { for(size_t w = 0; w < wrap; w++){ size_t offset = (stride-(w*(stride/wrap))-1); for(size_t i = 0; i < len/wrap; i++){ idx_cur[i + (len/wrap)*w] = offset + stride*i; } } idx_cur = idx_cur + len; } } //Mostly Stride-1 void ms1_indices(sgIdx_t *idx, size_t len, size_t worksets, size_t run, size_t gap){ sgIdx_t *idx_cur = idx; for(size_t j = 0; j < worksets; j++){ for(size_t i = 0; i < len; i++){ idx_cur[i] = (i / run) * gap + (i % run); } idx_cur = idx_cur + len; } } struct instruction get_random_instr_orig (struct trace tr) { double r = (double)rand() / (double)RAND_MAX; for (int i = 0; i < tr.length-1; i++) { if (tr.in[i].cpct > r) { return tr.in[i]; } } return tr.in[tr.length-1]; } struct instruction get_random_instr(struct trace tr) { static int init = 0; static dist_t *tr_dist; if( !init ) { // This style of init will leak the dist_t when done.. vrand_init(0x1337ULL); tr_dist = vrand_dist_alloc(tr.length); double sum_pct = 0.0; for(int i=0; i<tr.length; i++) { tr_dist->p[i] = tr.in[i].pct; sum_pct += tr_dist->p[i]; } vrand_dist_init(tr_dist, sum_pct); init = 1; } return tr.in[ vrand_dist(tr_dist) ]; } //returns the size of the buffer required size_t trace_indices( sgIdx_t *idx, size_t len, struct trace tr) { //for now, assume that all specified numbers are for 8-byte data types // and reads are 8 byte alignd sgsIdx_t *sidx = (sgsIdx_t*)idx; size_t data_type_size = 8; size_t cur = 0; int done = 0; while (cur < len && !done) { struct instruction in = get_random_instr (tr); int i; for (i = 0; i < in.length ; i++) { if (i + cur < len) { #if 0 // Skip first delta (i.e., between two SIMD instructions). if( i == 0 ) { sidx[i+cur] = 8; } else #endif { sidx[i+cur] = in.delta[i]; } } else { done = 1; break; } } cur += i; } assert (cur == len); // Pass over sidx[], convert byte addresses to indicies, track min. sidx[0] /= 8; sgsIdx_t min = sidx[0]; for (size_t i = 1; i < len; i++) { sidx[i] = sidx[i-1] + sidx[i] / 8; if (idx[i] < min) min = sidx[i]; } // Translate to zero-based start index, track max. idx[0] = sidx[0] - min; size_t max = idx[0]; for (size_t i = 1; i < len; i++) { idx[i] = sidx[i] - min; if (idx[i] > max) max = idx[i]; } // Pageinate the positive zero-based indicies in idx[]. long *pages = NULL, npages = 0; long page,pidx; long page_bits = 26; // 26 => 64MiB long new_idx; long new_max = 0; for(size_t i = 0; i < len; i++) { // Turn address into page. page = (idx[i]*8) >> page_bits; // Find existing / make new page entry. pidx = -1; for(size_t p = 0; p < npages; p++) { if( pages[p] == page ) { pidx = p; break; } } if( pidx == -1 ) { pidx = npages; npages++; if( !(pages = realloc(pages,npages*sizeof(long))) ) { fprintf(stderr,"trace_indices(): Failed to allocate new page entry (%ld).\n",npages); } pages[pidx] = page; } // Replace sparse page bits in address with dense page index bits. new_idx = (pidx << page_bits) | ((idx[i]*8) & ((1l<<page_bits)-1l)); new_idx /= 8; idx[i] = new_idx; if( idx[i] > new_max ) { new_max = idx[i]; } } max = new_max; if( npages ) free(pages); return max; } void compress_indices( sgIdx_t *idx, size_t len) { // Pageinate the positive zero-based indicies in idx[]. long *pages = NULL, npages = 0; long page,pidx; long page_bits = 12; // 12 => 4KiB long new_idx; for(size_t i = 0; i < len; i++) { // Turn address into page. page = (idx[i]*8) >> page_bits; // Find existing / make new page entry. pidx = -1; for(size_t p = 0; p < npages; p++) { if( pages[p] == page ) { pidx = p; break; } } if( pidx == -1 ) { pidx = npages; npages++; if( !(pages = realloc(pages,npages*sizeof(long))) ) { fprintf(stderr,"trace_indices(): Failed to allocate new page entry (%ld).\n",npages); } pages[pidx] = page; } // Replace sparse page bits in address with dense page index bits. new_idx = (pidx << page_bits) | ((idx[i]*8) & ((1l<<page_bits)-1l)); new_idx /= 8; idx[i] = new_idx; } if( npages ) free(pages); } #ifdef USE_OPENCL cl_mem clCreateBufferSafe(cl_context context, cl_mem_flags flags, size_t size, void *host_ptr){ cl_int err; cl_mem buf = clCreateBuffer(context, flags, size, host_ptr, &err); CHECK_CL_ERROR(err, "clCreateBuffer"); return buf; } #endif
sp.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - SP This benchmark is an OpenMP C version of the NPB SP code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: R. Van der Wijngaart W. Saphir OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "header.h" /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void ninvr(void); static void pinvr(void); static void compute_rhs(void); static void set_constants(void); static void txinvr(void); static void tzetar(void); static void verify(int no_time_steps, char *class, boolean *verified); static void x_solve(void); static void y_solve(void); static void z_solve(void); /*-------------------------------------------------------------------- program SP c-------------------------------------------------------------------*/ int main(int argc, char **argv) { int niter, step; double mflops, tmax; int nthreads = 1; boolean verified; char class; FILE *fp; /*-------------------------------------------------------------------- c Read input file (if it exists), else take c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - SP Benchmark\n\n"); fp = fopen("inputsp.data", "r"); if (fp != NULL) { printf(" Reading from input file inputsp.data\n"); fscanf(fp, "%d", &niter); while (fgetc(fp) != '\n'); fscanf(fp, "%lf", &dt); while (fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]); fclose(fp); } else { printf(" No input file inputsp.data. Using compiled defaults"); niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if ( (grid_points[0] > IMAX) || (grid_points[1] > JMAX) || (grid_points[2] > KMAX) ) { printf("%d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); timer_clear(1); timer_start(1); for (step = 1; step <= niter; step++) { if (step % 20 == 0 || step == 1) { printf(" Time step %4d\n", step); } adi(); } { #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(1); tmax = timer_read(1); verify(niter, &class, &verified); if (tmax != 0) { mflops = ( 881.174 * pow((double)PROBLEM_SIZE, 3.0) - 4683.91 * pow2((double)PROBLEM_SIZE) + 11484.5 * (double)PROBLEM_SIZE - 19272.4) * (double)niter / (tmax*1000000.0); } else { mflops = 0.0; } c_print_results("SP", class, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void add(void) { int i, j, k, m; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (k = 1; k <= grid_points[2]-2; k++) { u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ compute_rhs(); txinvr(); x_solve(); y_solve(); z_solve(); add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; #pragma omp parallel for firstprivate(rms ,m ) for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); #pragma omp parallel for firstprivate(add ,rms ,m ,k ,j ,i ) for (m = 0; m < 5; m++) { add = u[m][i][j][k] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } #pragma omp parallel for firstprivate(d ,m ,rms ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(m ,rms ) for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; #pragma omp parallel for firstprivate(rms ,m ) for (m = 0; m < 5; m++) { rms[m] = 0.0; } #pragma omp parallel for firstprivate(rms ,m ) for (i = 0; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(rms ,m ) for (j = 0; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(rms ,m ) for (k = 0; k <= grid_points[2]-2; k++) { #pragma omp parallel for firstprivate(m ,j ,k ,add ,rms ,i ) for (m = 0; m < 5; m++) { add = rhs[m][i][j][k]; rms[m] = rms[m] + add*add; } } } } #pragma omp parallel for firstprivate(d ,m ,rms ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(m ,rms ) for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(k ,j ,i ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(k ,j ,i ,m ) for (i = 0; i <= grid_points[0]-1; i++) { #pragma omp parallel for firstprivate(k ,j ,i ,m ) for (j = 0; j <= grid_points[1]-1; j++) { #pragma omp parallel for firstprivate(k ,j ,i ,m ) for (k= 0; k <= grid_points[2]-1; k++) { forcing[m][i][j][k] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); #pragma omp parallel for firstprivate(m ,i ,j ,k ) for (m = 0; m < 5; m++) { ue[m][i] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; #pragma omp parallel for firstprivate(dtpp ,m ,i ,j ,k ) for (m = 1; m < 5; m++) { buf[m][i] = dtpp * dtemp[m]; } cuf[i] = buf[1][i] * buf[1][i]; buf[0][i] = cuf[i] + buf[2][i] * buf[2][i] + buf[3][i] * buf[3][i]; q[i] = 0.5 * (buf[1][i]*ue[1][i] + buf[2][i]*ue[2][i] + buf[3][i]*ue[3][i]); } #pragma omp parallel for firstprivate(dx1tx1 ,tx2 ,dx2tx1 ,xxcon1 ,c2 ,dx3tx1 ,xxcon2 ,dx4tx1 ,dx5tx1 ,xxcon5 ,xxcon4 ,xxcon3 ,c1 ,i ,j ,k ) for (i = 1; i <= grid_points[0]-2; i++) { im1 = i-1; ip1 = i+1; forcing[0][i][j][k] = forcing[0][i][j][k] - tx2*( ue[1][ip1]-ue[1][im1] )+ dx1tx1*(ue[0][ip1]-2.0*ue[0][i]+ue[0][im1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tx2 * ((ue[1][ip1]*buf[1][ip1]+c2*(ue[4][ip1]-q[ip1]))- (ue[1][im1]*buf[1][im1]+c2*(ue[4][im1]-q[im1])))+ xxcon1*(buf[1][ip1]-2.0*buf[1][i]+buf[1][im1])+ dx2tx1*( ue[1][ip1]-2.0* ue[1][i]+ue[1][im1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tx2 * (ue[2][ip1]*buf[1][ip1]-ue[2][im1]*buf[1][im1])+ xxcon2*(buf[2][ip1]-2.0*buf[2][i]+buf[2][im1])+ dx3tx1*( ue[2][ip1]-2.0*ue[2][i] +ue[2][im1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tx2*(ue[3][ip1]*buf[1][ip1]-ue[3][im1]*buf[1][im1])+ xxcon2*(buf[3][ip1]-2.0*buf[3][i]+buf[3][im1])+ dx4tx1*( ue[3][ip1]-2.0* ue[3][i]+ ue[3][im1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tx2*(buf[1][ip1]*(c1*ue[4][ip1]-c2*q[ip1])- buf[1][im1]*(c1*ue[4][im1]-c2*q[im1]))+ 0.5*xxcon3*(buf[0][ip1]-2.0*buf[0][i]+ buf[0][im1])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[4][ip1]-2.0*buf[4][i]+buf[4][im1])+ dx5tx1*( ue[4][ip1]-2.0* ue[4][i]+ ue[4][im1]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(dssp ,m ,j ,k ) for (m = 0; m < 5; m++) { i = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0*ue[m][i] - 4.0*ue[m][i+1] +ue[m][i+2]); i = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0*ue[m][i-1] + 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]); } #pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(i ,dssp ,m ,j ,k ) for (i = 3; i <= grid_points[0]-4; i++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp* (ue[m][i-2] - 4.0*ue[m][i-1] + 6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]); } } #pragma omp parallel for firstprivate(dssp ,i ,m ,j ,k ) for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i-2] - 4.0*ue[m][i-1] + 6.0*ue[m][i] - 4.0*ue[m][i+1]); i = grid_points[0]-2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i-2] - 4.0*ue[m][i-1] + 5.0*ue[m][i]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); #pragma omp parallel for firstprivate(m ,j ,i ,k ) for (m = 0; m < 5; m++) { ue[m][j] = dtemp[m]; } dtpp = 1.0/dtemp[0]; #pragma omp parallel for firstprivate(dtpp ,m ,j ,i ,k ) for (m = 1; m < 5; m++) { buf[m][j] = dtpp * dtemp[m]; } cuf[j] = buf[2][j] * buf[2][j]; buf[0][j] = cuf[j] + buf[1][j] * buf[1][j] + buf[3][j] * buf[3][j]; q[j] = 0.5*(buf[1][j]*ue[1][j] + buf[2][j]*ue[2][j] + buf[3][j]*ue[3][j]); } #pragma omp parallel for firstprivate(dy1ty1 ,ty2 ,dy2ty1 ,yycon2 ,dy3ty1 ,yycon1 ,c2 ,dy4ty1 ,dy5ty1 ,yycon5 ,yycon4 ,yycon3 ,c1 ,j ,i ,k ) for (j = 1; j <= grid_points[1]-2; j++) { jm1 = j-1; jp1 = j+1; forcing[0][i][j][k] = forcing[0][i][j][k] - ty2*( ue[2][jp1]-ue[2][jm1] )+ dy1ty1*(ue[0][jp1]-2.0*ue[0][j]+ue[0][jm1]); forcing[1][i][j][k] = forcing[1][i][j][k] - ty2*(ue[1][jp1]*buf[2][jp1]-ue[1][jm1]*buf[2][jm1])+ yycon2*(buf[1][jp1]-2.0*buf[1][j]+buf[1][jm1])+ dy2ty1*( ue[1][jp1]-2.0* ue[1][j]+ ue[1][jm1]); forcing[2][i][j][k] = forcing[2][i][j][k] - ty2*((ue[2][jp1]*buf[2][jp1]+c2*(ue[4][jp1]-q[jp1]))- (ue[2][jm1]*buf[2][jm1]+c2*(ue[4][jm1]-q[jm1])))+ yycon1*(buf[2][jp1]-2.0*buf[2][j]+buf[2][jm1])+ dy3ty1*( ue[2][jp1]-2.0*ue[2][j] +ue[2][jm1]); forcing[3][i][j][k] = forcing[3][i][j][k] - ty2*(ue[3][jp1]*buf[2][jp1]-ue[3][jm1]*buf[2][jm1])+ yycon2*(buf[3][jp1]-2.0*buf[3][j]+buf[3][jm1])+ dy4ty1*( ue[3][jp1]-2.0*ue[3][j]+ ue[3][jm1]); forcing[4][i][j][k] = forcing[4][i][j][k] - ty2*(buf[2][jp1]*(c1*ue[4][jp1]-c2*q[jp1])- buf[2][jm1]*(c1*ue[4][jm1]-c2*q[jm1]))+ 0.5*yycon3*(buf[0][jp1]-2.0*buf[0][j]+ buf[0][jm1])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[4][jp1]-2.0*buf[4][j]+buf[4][jm1])+ dy5ty1*(ue[4][jp1]-2.0*ue[4][j]+ue[4][jm1]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(dssp ,m ,i ,k ) for (m = 0; m < 5; m++) { j = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0*ue[m][j] - 4.0*ue[m][j+1] +ue[m][j+2]); j = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0*ue[m][j-1] + 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]); } #pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(j ,dssp ,m ,i ,k ) for (j = 3; j <= grid_points[1]-4; j++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp* (ue[m][j-2] - 4.0*ue[m][j-1] + 6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]); } } #pragma omp parallel for firstprivate(dssp ,j ,m ,i ,k ) for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j-2] - 4.0*ue[m][j-1] + 6.0*ue[m][j] - 4.0*ue[m][j+1]); j = grid_points[1]-2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j-2] - 4.0*ue[m][j-1] + 5.0*ue[m][j]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); #pragma omp parallel for firstprivate(m ,k ,i ,j ) for (m = 0; m < 5; m++) { ue[m][k] = dtemp[m]; } dtpp = 1.0/dtemp[0]; #pragma omp parallel for firstprivate(dtpp ,m ,k ,i ,j ) for (m = 1; m < 5; m++) { buf[m][k] = dtpp * dtemp[m]; } cuf[k] = buf[3][k] * buf[3][k]; buf[0][k] = cuf[k] + buf[1][k] * buf[1][k] + buf[2][k] * buf[2][k]; q[k] = 0.5*(buf[1][k]*ue[1][k] + buf[2][k]*ue[2][k] + buf[3][k]*ue[3][k]); } #pragma omp parallel for firstprivate(dz1tz1 ,tz2 ,dz2tz1 ,zzcon2 ,dz3tz1 ,dz4tz1 ,zzcon1 ,c2 ,dz5tz1 ,zzcon5 ,zzcon4 ,zzcon3 ,c1 ,k ,i ,j ) for (k = 1; k <= grid_points[2]-2; k++) { km1 = k-1; kp1 = k+1; forcing[0][i][j][k] = forcing[0][i][j][k] - tz2*( ue[3][kp1]-ue[3][km1] )+ dz1tz1*(ue[0][kp1]-2.0*ue[0][k]+ue[0][km1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tz2 * (ue[1][kp1]*buf[3][kp1]-ue[1][km1]*buf[3][km1])+ zzcon2*(buf[1][kp1]-2.0*buf[1][k]+buf[1][km1])+ dz2tz1*( ue[1][kp1]-2.0* ue[1][k]+ ue[1][km1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tz2 * (ue[2][kp1]*buf[3][kp1]-ue[2][km1]*buf[3][km1])+ zzcon2*(buf[2][kp1]-2.0*buf[2][k]+buf[2][km1])+ dz3tz1*(ue[2][kp1]-2.0*ue[2][k]+ue[2][km1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tz2 * ((ue[3][kp1]*buf[3][kp1]+c2*(ue[4][kp1]-q[kp1]))- (ue[3][km1]*buf[3][km1]+c2*(ue[4][km1]-q[km1])))+ zzcon1*(buf[3][kp1]-2.0*buf[3][k]+buf[3][km1])+ dz4tz1*( ue[3][kp1]-2.0*ue[3][k] +ue[3][km1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tz2 * (buf[3][kp1]*(c1*ue[4][kp1]-c2*q[kp1])- buf[3][km1]*(c1*ue[4][km1]-c2*q[km1]))+ 0.5*zzcon3*(buf[0][kp1]-2.0*buf[0][k] +buf[0][km1])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[4][kp1]-2.0*buf[4][k]+buf[4][km1])+ dz5tz1*( ue[4][kp1]-2.0*ue[4][k]+ ue[4][km1]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(dssp ,m ,i ,j ) for (m = 0; m < 5; m++) { k = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0*ue[m][k] - 4.0*ue[m][k+1] +ue[m][k+2]); k = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0*ue[m][k-1] + 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]); } #pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(k ,dssp ,m ,i ,j ) for (k = 3; k <= grid_points[2]-4; k++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp* (ue[m][k-2] - 4.0*ue[m][k-1] + 6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]); } } #pragma omp parallel for firstprivate(dssp ,k ,m ,i ,j ) for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k-2] - 4.0*ue[m][k-1] + 6.0*ue[m][k] - 4.0*ue[m][k+1]); k = grid_points[2]-2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k-2] - 4.0*ue[m][k-1] + 5.0*ue[m][k]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (k = 1; k <= grid_points[2]-2; k++) { forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; #pragma omp parallel for firstprivate(zeta ,eta ,xi ,dtemp ,m ) for (m = 0; m < 5; m++) { dtemp[m] = ce[0][m] + xi*(ce[1][m] + xi*(ce[4][m] + xi*(ce[7][m] + xi*ce[10][m]))) + eta*(ce[2][m] + eta*(ce[5][m] + eta*(ce[8][m] + eta*ce[11][m])))+ zeta*(ce[3][m] + zeta*(ce[6][m] + zeta*(ce[9][m] + zeta*ce[12][m]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(j ,k ,i ) for (i = 0; i <= IMAX-1; i++) { #pragma omp parallel for firstprivate(j ,k ,i ) for (j = 0; j <= IMAX-1; j++) { #pragma omp parallel for firstprivate(j ,k ,i ) for (k = 0; k <= IMAX-1; k++) { u[0][i][j][k] = 1.0; u[1][i][j][k] = 0.0; u[2][i][j][k] = 0.0; u[3][i][j][k] = 0.0; u[4][i][j][k] = 1.0; } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &Pface[ix][0][0]); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } #pragma omp parallel for firstprivate(Pxi ,Peta ,Pzeta ,xi ,eta ,zeta ,m ,k ,j ,i ) for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[m][i][j][k] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ xi = 0.0; i = 0; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for firstprivate(m ,k ,j ) for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ xi = 1.0; i = grid_points[0]-1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for firstprivate(i ,m ,k ,j ) for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ eta = 0.0; j = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for firstprivate(m ,k ,i ) for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ eta = 1.0; j = grid_points[1]-1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for firstprivate(j ,m ,k ,i ) for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ zeta = 0.0; k = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); #pragma omp parallel for firstprivate(m ,j ,i ) for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ zeta = 1.0; k = grid_points[2]-1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, n; /*-------------------------------------------------------------------- c zap the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(k ,j ,i ,n ) for (n = 0; n < 15; n++) { #pragma omp parallel for firstprivate(k ,j ,i ,n ) for (i = 0; i < grid_points[0]; i++) { #pragma omp parallel for firstprivate(k ,j ,i ,n ) for (j = 0; j < grid_points[1]; j++) { #pragma omp parallel for firstprivate(k ,j ,i ,n ) for (k = 0; k < grid_points[2]; k++) { lhs[n][i][j][k] = 0.0; } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but c convenient c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(i ,j ,k ,n ) for (n = 0; n < 3; n++) { #pragma omp parallel for firstprivate(i ,j ,k ,n ) for (i = 0; i < grid_points[0]; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,n ) for (j = 0; j < grid_points[1]; j++) { #pragma omp parallel for firstprivate(i ,j ,k ,n ) for (k = 0; k < grid_points[2]; k++) { lhs[5*n+2][i][j][k] = 1.0; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three x-factors c-------------------------------------------------------------------*/ double ru1; int i, j, k; /*-------------------------------------------------------------------- c first fill the lhs for the u-eigenvalue c-------------------------------------------------------------------*/ for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { #pragma omp for for (i = 0; i <= grid_points[0]-1; i++) { ru1 = c3c4*rho_i[i][j][k]; cv[i] = us[i][j][k]; rhon[i] = max(dx2+con43*ru1, max(dx5+c1c5*ru1, max(dxmax+ru1, dx1))); } #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = - dttx2 * cv[i-1] - dttx1 * rhon[i-1]; lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i]; lhs[3][i][j][k] = dttx2 * cv[i+1] - dttx1 * rhon[i+1]; lhs[4][i][j][k] = 0.0; } } } /*-------------------------------------------------------------------- c add fourth order dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(k ,comz5 ,comz4 ,comz1 ,comz6 ,j ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4; lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz6; lhs[3][i+1][j][k] = lhs[3][i+1][j][k] - comz4; lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1; } } #pragma omp for for (i = 3; i <= grid_points[0]-4; i++) { #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } i = grid_points[0]-3; #pragma omp for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(k ,comz1 ,i ,comz4 ,comz6 ,comz5 ,j ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1; lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4; lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5; } } /*-------------------------------------------------------------------- c subsequently, fill the other factors (u+c), (u-c) by adding to c the first c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,dttx2 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,dttx2 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[0+5][i][j][k] = lhs[0][i][j][k]; lhs[1+5][i][j][k] = lhs[1][i][j][k] - dttx2 * speed[i-1][j][k]; lhs[2+5][i][j][k] = lhs[2][i][j][k]; lhs[3+5][i][j][k] = lhs[3][i][j][k] + dttx2 * speed[i+1][j][k]; lhs[4+5][i][j][k] = lhs[4][i][j][k]; lhs[0+10][i][j][k] = lhs[0][i][j][k]; lhs[1+10][i][j][k] = lhs[1][i][j][k] + dttx2 * speed[i-1][j][k]; lhs[2+10][i][j][k] = lhs[2][i][j][k]; lhs[3+10][i][j][k] = lhs[3][i][j][k] - dttx2 * speed[i+1][j][k]; lhs[4+10][i][j][k] = lhs[4][i][j][k]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ double ru1; int i, j, k; /*-------------------------------------------------------------------- c first fill the lhs for the u-eigenvalue c-------------------------------------------------------------------*/ for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { #pragma omp for for (j = 0; j <= grid_points[1]-1; j++) { ru1 = c3c4*rho_i[i][j][k]; cv[j] = vs[i][j][k]; rhoq[j] = max(dy3 + con43 * ru1, max(dy5 + c1c5*ru1, max(dymax + ru1, dy1))); } #pragma omp for for (j = 1; j <= grid_points[1]-2; j++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1]; lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j]; lhs[3][i][j][k] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1]; lhs[4][i][j][k] = 0.0; } } } /*-------------------------------------------------------------------- c add fourth order dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(k ,comz5 ,comz4 ,comz1 ,comz6 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4; lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6; lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4; lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1; } } #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) for (j = 3; j <= grid_points[1]-4; j++) { #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } j = grid_points[1]-3; #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(k ,comz1 ,j ,comz4 ,comz6 ,comz5 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j+1][k] = lhs[0][i][j+1][k] + comz1; lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4; lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz5; } } /*-------------------------------------------------------------------- c subsequently, do the other two factors c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,dtty2 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,dtty2 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[0+5][i][j][k] = lhs[0][i][j][k]; lhs[1+5][i][j][k] = lhs[1][i][j][k] - dtty2 * speed[i][j-1][k]; lhs[2+5][i][j][k] = lhs[2][i][j][k]; lhs[3+5][i][j][k] = lhs[3][i][j][k] + dtty2 * speed[i][j+1][k]; lhs[4+5][i][j][k] = lhs[4][i][j][k]; lhs[0+10][i][j][k] = lhs[0][i][j][k]; lhs[1+10][i][j][k] = lhs[1][i][j][k] + dtty2 * speed[i][j-1][k]; lhs[2+10][i][j][k] = lhs[2][i][j][k]; lhs[3+10][i][j][k] = lhs[3][i][j][k] - dtty2 * speed[i][j+1][k]; lhs[4+10][i][j][k] = lhs[4][i][j][k]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ double ru1; int i, j, k; /*-------------------------------------------------------------------- c first fill the lhs for the u-eigenvalue c-------------------------------------------------------------------*/ for (i = 1; i <= grid_points[0]-2; i++) { for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp for for (k = 0; k <= grid_points[2]-1; k++) { ru1 = c3c4*rho_i[i][j][k]; cv[k] = ws[i][j][k]; rhos[k] = max(dz4 + con43 * ru1, max(dz5 + c1c5 * ru1, max(dzmax + ru1, dz1))); } #pragma omp for for (k = 1; k <= grid_points[2]-2; k++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1]; lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k]; lhs[3][i][j][k] = dttz2 * cv[k+1] - dttz1 * rhos[k+1]; lhs[4][i][j][k] = 0.0; } } } /*-------------------------------------------------------------------- c add fourth order dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,comz5 ,comz4 ,comz1 ,comz6 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4; lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6; lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4; lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1; } } #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,comz1 ,comz4 ,comz6 ,i ) for (k = 3; k <= grid_points[2]-4; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } k = grid_points[2]-3; #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,comz1 ,k ,comz4 ,comz6 ,comz5 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1; lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4; lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5; } } /*-------------------------------------------------------------------- c subsequently, fill the other factors (u+c), (u-c) c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,dttz2 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,dttz2 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { lhs[0+5][i][j][k] = lhs[0][i][j][k]; lhs[1+5][i][j][k] = lhs[1][i][j][k] - dttz2 * speed[i][j][k-1]; lhs[2+5][i][j][k] = lhs[2][i][j][k]; lhs[3+5][i][j][k] = lhs[3][i][j][k] + dttz2 * speed[i][j][k+1]; lhs[4+5][i][j][k] = lhs[4][i][j][k]; lhs[0+10][i][j][k] = lhs[0][i][j][k]; lhs[1+10][i][j][k] = lhs[1][i][j][k] + dttz2 * speed[i][j][k-1]; lhs[2+10][i][j][k] = lhs[2][i][j][k]; lhs[3+10][i][j][k] = lhs[3][i][j][k] - dttz2 * speed[i][j][k+1]; lhs[4+10][i][j][k] = lhs[4][i][j][k]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ninvr(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication c-------------------------------------------------------------------*/ int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp parallel for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) for (k = 1; k <= grid_points[2]-2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r3; t2 = 0.5 * ( r4 + r5 ); rhs[0][i][j][k] = -r2; rhs[1][i][j][k] = r1; rhs[2][i][j][k] = bt * ( r4 - r5 ); rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pinvr(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication c-------------------------------------------------------------------*/ int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp parallel for private(i ,j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,r1 ,r2 ,r3 ,r4 ,r5 ,t1 ,t2 ,bt ,i ) for (k = 1; k <= grid_points[2]-2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r1; t2 = 0.5 * ( r4 + r5 ); rhs[0][i][j][k] = bt * ( r4 - r5 ); rhs[1][i][j][k] = -r3; rhs[2][i][j][k] = r2; rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i <= grid_points[0]-1; i++) { #pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) for (j = 0; j <= grid_points[1]-1; j++) { #pragma omp parallel for firstprivate(j ,k ,rho_inv ,aux ,c1c2 ,i ) for (k = 0; k <= grid_points[2]-1; k++) { rho_inv = 1.0/u[0][i][j][k]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[1][i][j][k] * rho_inv; vs[i][j][k] = u[2][i][j][k] * rho_inv; ws[i][j][k] = u[3][i][j][k] * rho_inv; square[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] + u[2][i][j][k]*u[2][i][j][k] + u[3][i][j][k]*u[3][i][j][k] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; /*-------------------------------------------------------------------- c (do not need speed and ainx until the lhs computation) c-------------------------------------------------------------------*/ aux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]); aux = sqrt(aux); speed[i][j][k] = aux; ainv[i][j][k] = 1.0/aux; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 0; i <= grid_points[0]-1; i++) { for (j = 0; j <= grid_points[1]-1; j++) { #pragma omp parallel for firstprivate(k ,j ,i ,m ) for (k = 0; k <= grid_points[2]-1; k++) { rhs[m][i][j][k] = forcing[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,uijk ,up1 ,um1 ,tx2 ,dx1tx1 ,c2 ,dx2tx1 ,con43 ,xxcon2 ,dx3tx1 ,dx4tx1 ,c1 ,xxcon5 ,xxcon3 ,dx5tx1 ,xxcon4 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * (u[0][i+1][j][k] - 2.0*u[0][i][j][k] + u[0][i-1][j][k]) - tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * (u[1][i+1][j][k] - 2.0*u[1][i][j][k] + u[1][i-1][j][k]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[1][i+1][j][k]*up1 - u[1][i-1][j][k]*um1 + (u[4][i+1][j][k]- square[i+1][j][k]- u[4][i-1][j][k]+ square[i-1][j][k])* c2); rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * (u[2][i+1][j][k] - 2.0*u[2][i][j][k] + u[2][i-1][j][k]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[2][i+1][j][k]*up1 - u[2][i-1][j][k]*um1); rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * (u[3][i+1][j][k] - 2.0*u[3][i][j][k] + u[3][i-1][j][k]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[3][i+1][j][k]*up1 - u[3][i-1][j][k]*um1); rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * (u[4][i+1][j][k] - 2.0*u[4][i][j][k] + u[4][i-1][j][k]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] - 2.0*u[4][i][j][k]*rho_i[i][j][k] + u[4][i-1][j][k]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[4][i+1][j][k] - c2*square[i+1][j][k])*up1 - (c1*u[4][i-1][j][k] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k]- dssp * ( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]); } } } i = 2; #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + u[m][i+2][j][k]); } } } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 3*1; i <= grid_points[0]-3*1-1; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] + u[m][i+2][j][k] ); } } } } i = grid_points[0]-3; #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] ); } } } i = grid_points[0]-2; #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,i ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] + 5.0*u[m][i][j][k] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,vijk ,vp1 ,vm1 ,ty2 ,dy1ty1 ,yycon2 ,dy2ty1 ,c2 ,dy3ty1 ,con43 ,dy4ty1 ,c1 ,yycon5 ,yycon3 ,dy5ty1 ,yycon4 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * (u[0][i][j+1][k] - 2.0*u[0][i][j][k] + u[0][i][j-1][k]) - ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * (u[1][i][j+1][k] - 2.0*u[1][i][j][k] + u[1][i][j-1][k]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[1][i][j+1][k]*vp1 - u[1][i][j-1][k]*vm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * (u[2][i][j+1][k] - 2.0*u[2][i][j][k] + u[2][i][j-1][k]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[2][i][j+1][k]*vp1 - u[2][i][j-1][k]*vm1 + (u[4][i][j+1][k] - square[i][j+1][k] - u[4][i][j-1][k] + square[i][j-1][k]) *c2); rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * (u[3][i][j+1][k] - 2.0*u[3][i][j][k] + u[3][i][j-1][k]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[3][i][j+1][k]*vp1 - u[3][i][j-1][k]*vm1); rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * (u[4][i][j+1][k] - 2.0*u[4][i][j][k] + u[4][i][j-1][k]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] - 2.0*u[4][i][j][k]*rho_i[i][j][k] + u[4][i][j-1][k]*rho_i[i][j-1][k]) - ty2 * ((c1*u[4][i][j+1][k] - c2*square[i][j+1][k]) * vp1 - (c1*u[4][i][j-1][k] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k]- dssp * ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]); } } } j = 2; #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,k ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + u[m][i][j+2][k]); } } } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (j = 3*1; j <= grid_points[1]-3*1-1; j++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] + u[m][i][j+2][k] ); } } } } j = grid_points[1]-3; #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] ); } } } j = grid_points[1]-2; #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,k ,j ,dssp ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] + 5.0*u[m][i][j][k] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,wijk ,wp1 ,wm1 ,tz2 ,dz1tz1 ,zzcon2 ,dz2tz1 ,dz3tz1 ,c2 ,dz4tz1 ,con43 ,c1 ,zzcon5 ,zzcon3 ,dz5tz1 ,zzcon4 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * (u[0][i][j][k+1] - 2.0*u[0][i][j][k] + u[0][i][j][k-1]) - tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]); rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * (u[1][i][j][k+1] - 2.0*u[1][i][j][k] + u[1][i][j][k-1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[1][i][j][k+1]*wp1 - u[1][i][j][k-1]*wm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * (u[2][i][j][k+1] - 2.0*u[2][i][j][k] + u[2][i][j][k-1]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[2][i][j][k+1]*wp1 - u[2][i][j][k-1]*wm1); rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * (u[3][i][j][k+1] - 2.0*u[3][i][j][k] + u[3][i][j][k-1]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[3][i][j][k+1]*wp1 - u[3][i][j][k-1]*wm1 + (u[4][i][j][k+1] - square[i][j][k+1] - u[4][i][j][k-1] + square[i][j][k-1]) *c2); rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * (u[4][i][j][k+1] - 2.0*u[4][i][j][k] + u[4][i][j][k-1]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] - 2.0*u[4][i][j][k]*rho_i[i][j][k] + u[4][i][j][k-1]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[4][i][j][k+1] - c2*square[i][j][k+1])*wp1 - (c1*u[4][i][j][k-1] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k]- dssp * ( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]); } } } k = 2; #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,dssp ,m ) for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + u[m][i][j][k+2]); } } } #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (k = 3*1; k <= grid_points[2]-3*1-1; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] + u[m][i][j][k+2] ); } } } } k = grid_points[2]-3; #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] ); } } } k = grid_points[2]-2; #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,dssp ,m ) for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * ( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] + 5.0*u[m][i][j][k] ); } } } #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) for (m = 0; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(i ,j ,k ,dt ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] * dt; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[1][0] = 0.0; ce[2][0] = 0.0; ce[3][0] = 4.0; ce[4][0] = 5.0; ce[5][0] = 3.0; ce[6][0] = 0.5; ce[7][0] = 0.02; ce[8][0] = 0.01; ce[9][0] = 0.03; ce[10][0] = 0.5; ce[11][0] = 0.4; ce[12][0] = 0.3; ce[0][1] = 1.0; ce[1][1] = 0.0; ce[2][1] = 0.0; ce[3][1] = 0.0; ce[4][1] = 1.0; ce[5][1] = 2.0; ce[6][1] = 3.0; ce[7][1] = 0.01; ce[8][1] = 0.03; ce[9][1] = 0.02; ce[10][1] = 0.4; ce[11][1] = 0.3; ce[12][1] = 0.5; ce[0][2] = 2.0; ce[1][2] = 2.0; ce[2][2] = 0.0; ce[3][2] = 0.0; ce[4][2] = 0.0; ce[5][2] = 2.0; ce[6][2] = 3.0; ce[7][2] = 0.04; ce[8][2] = 0.03; ce[9][2] = 0.05; ce[10][2] = 0.3; ce[11][2] = 0.5; ce[12][2] = 0.4; ce[0][3] = 2.0; ce[1][3] = 2.0; ce[2][3] = 0.0; ce[3][3] = 0.0; ce[4][3] = 0.0; ce[5][3] = 2.0; ce[6][3] = 3.0; ce[7][3] = 0.03; ce[8][3] = 0.05; ce[9][3] = 0.04; ce[10][3] = 0.2; ce[11][3] = 0.1; ce[12][3] = 0.3; ce[0][4] = 5.0; ce[1][4] = 4.0; ce[2][4] = 3.0; ce[3][4] = 2.0; ce[4][4] = 0.1; ce[5][4] = 0.4; ce[6][4] = 0.3; ce[7][4] = 0.05; ce[8][4] = 0.04; ce[9][4] = 0.03; ce[10][4] = 0.1; ce[11][4] = 0.3; ce[12][4] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; bt = sqrt(0.5); dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void txinvr(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication --------------------------------------------------------------------*/ int i, j, k; double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3, r4, r5, ac2inv; #pragma omp for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,ru1 ,uu ,vv ,ww ,ac ,ac2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,ru1 ,uu ,vv ,ww ,ac ,ac2inv ,r1 ,r2 ,r3 ,r4 ,t1 ,t2 ,t3 ,c2 ,bt ,i ) for (k = 1; k <= grid_points[2]-2; k++) { ru1 = rho_i[i][j][k]; uu = us[i][j][k]; vv = vs[i][j][k]; ww = ws[i][j][k]; ac = speed[i][j][k]; ac2inv = ainv[i][j][k]*ainv[i][j][k]; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 - vv*r3 - ww*r4 + r5 ); t2 = bt * ru1 * ( uu * r1 - r2 ); t3 = ( bt * ru1 * ac ) * t1; rhs[0][i][j][k] = r1 - t1; rhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 ); rhs[2][i][j][k] = ru1 * ( vv*r1 - r3 ); rhs[3][i][j][k] = - t2 + t3; rhs[4][i][j][k] = t2 + t3; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void tzetar(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c block-diagonal matrix-vector multiplication c-------------------------------------------------------------------*/ int i, j, k; double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5, btuz, acinv, ac2u, uzik1; #pragma omp for private(i ,j ,k ,t1 ,t2 ,t3 ,ac ,xvel ,yvel ,zvel ,r1 ,r2 ,r3 ,r4 ,r5 ,btuz ,ac2u ,uzik1 ) for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(j ,k ,xvel ,yvel ,zvel ,ac ,acinv ,r1 ,r2 ,r3 ,r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,xvel ,yvel ,zvel ,ac ,acinv ,r1 ,r2 ,r3 ,r4 ,r5 ,uzik1 ,btuz ,t1 ,t2 ,t3 ,bt ,c2iv ,i ) for (k = 1; k <= grid_points[2]-2; k++) { xvel = us[i][j][k]; yvel = vs[i][j][k]; zvel = ws[i][j][k]; ac = speed[i][j][k]; acinv = ainv[i][j][k]; ac2u = ac*ac; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; uzik1 = u[0][i][j][k]; btuz = bt * uzik1; t1 = btuz*acinv * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[0][i][j][k] = t2; rhs[1][i][j][k] = -uzik1*r2 + xvel*t2; rhs[2][i][j][k] = uzik1*r1 + yvel*t2; rhs[3][i][j][k] = zvel*t2 + t3; rhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) + qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *class, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing --------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); #pragma omp parallel for firstprivate(dt ,m ) for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = TRUE; #pragma omp parallel for firstprivate(m ) for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.50d-02 --------------------------------------------------------------------*/ if ( grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 100) { *class = 'S'; dtref = 1.5e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 2.7470315451339479e-02; xcrref[1] = 1.0360746705285417e-02; xcrref[2] = 1.6235745065095532e-02; xcrref[3] = 1.5840557224455615e-02; xcrref[4] = 3.4849040609362460e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 2.7289258557377227e-05; xceref[1] = 1.0364446640837285e-05; xceref[2] = 1.6154798287166471e-05; xceref[3] = 1.5750704994480102e-05; xceref[4] = 3.4177666183390531e-05; /*-------------------------------------------------------------------- c reference data for 36X36X36 grids after 400 time steps, with DT = 1.5d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 36 && grid_points[1] == 36 && grid_points[2] == 36 && no_time_steps == 400) { *class = 'W'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 0.1893253733584e-02; xcrref[1] = 0.1717075447775e-03; xcrref[2] = 0.2778153350936e-03; xcrref[3] = 0.2887475409984e-03; xcrref[4] = 0.3143611161242e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 0.7542088599534e-04; xceref[1] = 0.6512852253086e-05; xceref[2] = 0.1049092285688e-04; xceref[3] = 0.1128838671535e-04; xceref[4] = 0.1212845639773e-03; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 400 time steps, with DT = 1.5d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 400 ) { *class = 'A'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 2.4799822399300195; xcrref[1] = 1.1276337964368832; xcrref[2] = 1.5028977888770491; xcrref[3] = 1.4217816211695179; xcrref[4] = 2.1292113035138280; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 1.0900140297820550e-04; xceref[1] = 3.7343951769282091e-05; xceref[2] = 5.0092785406541633e-05; xceref[3] = 4.7671093939528255e-05; xceref[4] = 1.3621613399213001e-04; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 400 time steps, c with DT = 1.0d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 400) { *class = 'B'; dtref = 1.0e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 0.6903293579998e+02; xcrref[1] = 0.3095134488084e+02; xcrref[2] = 0.4103336647017e+02; xcrref[3] = 0.3864769009604e+02; xcrref[4] = 0.5643482272596e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 0.9810006190188e-02; xceref[1] = 0.1022827905670e-02; xceref[2] = 0.1720597911692e-02; xceref[3] = 0.1694479428231e-02; xceref[4] = 0.1847456263981e-01; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 400 time steps, c with DT = 0.67d-03 --------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 400) { *class = 'C'; dtref = 0.67e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. --------------------------------------------------------------------*/ xcrref[0] = 0.5881691581829e+03; xcrref[1] = 0.2454417603569e+03; xcrref[2] = 0.3293829191851e+03; xcrref[3] = 0.3081924971891e+03; xcrref[4] = 0.4597223799176e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. --------------------------------------------------------------------*/ xceref[0] = 0.2598120500183e+00; xceref[1] = 0.2590888922315e-01; xceref[2] = 0.5132886416320e-01; xceref[3] = 0.4806073419454e-01; xceref[4] = 0.5483377491301e+00; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(m ) for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]) ; xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (*class != 'U') { printf(" Verification being performed for class %1c\n", *class); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function performs the solution of the approximate factorization c step in the x-direction for all five matrix components c simultaneously. The Thomas algorithm is employed to solve the c systems for the x-lines. Boundary conditions are non-periodic --------------------------------------------------------------------*/ int i, j, k, n, i1, i2, m; double fac1, fac2; /*-------------------------------------------------------------------- c FORWARD ELIMINATION --------------------------------------------------------------------*/ lhsx(); /*-------------------------------------------------------------------- c perform the Thomas algorithm; first, FORWARD ELIMINATION --------------------------------------------------------------------*/ n = 0; for (i = 0; i <= grid_points[0]-3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(m ,k ,fac1 ,j ,i ) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; } lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+3][i][j][k]; lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n+0][i2][j][k]*rhs[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c The last two rows in this grid block are a bit different, c since they do not have two more rows available for the c elimination of off-diagonal entries --------------------------------------------------------------------*/ i = grid_points[0]-2; i1 = grid_points[0]-1; #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,i ,i1 ,j ) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1.0/lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; } /*-------------------------------------------------------------------- c scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i1][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k]; } } } /*-------------------------------------------------------------------- c do the u+c and the u-c factors --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(j ,k ,i ,fac1 ,i1 ,m ,n ) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (i = 0; i <= grid_points[0]-3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+3][i][j][k]; lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] - lhs[n+0][i2][j][k]*lhs[n+4][i][j][k]; rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n+0][i2][j][k]*rhs[m][i][j][k]; } } } /*-------------------------------------------------------------------- c And again the last two rows separately --------------------------------------------------------------------*/ i = grid_points[0]-2; i1 = grid_points[0]-1; #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+3][i][j][k]; lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] - lhs[n+1][i1][j][k]*lhs[n+4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n+1][i1][j][k]*rhs[m][i][j][k]; /*-------------------------------------------------------------------- c Scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i1][j][k]; rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k]; } } } /*-------------------------------------------------------------------- c BACKSUBSTITUTION --------------------------------------------------------------------*/ i = grid_points[0]-2; i1 = grid_points[0]-1; n = 0; for (m = 0; m < 3; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k]; } } } #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) for (m = 3; m < 5; m++) { #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,i1 ,i ,m ) for (k = 1; k <= grid_points[2]-2; k++) { n = (m-3+1)*5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k]; } } } /*-------------------------------------------------------------------- c The first three factors --------------------------------------------------------------------*/ n = 0; for (i = grid_points[0]-3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for for (m = 0; m < 3; m++) { #pragma omp parallel for firstprivate(j ,k ,m ,i ) for (j = 1; j <= grid_points[1]-2; j++) { #pragma omp parallel for firstprivate(j ,k ,m ,i ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k] - lhs[n+4][i][j][k]*rhs[m][i2][j][k]; } } } } /*-------------------------------------------------------------------- c And the remaining two --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (i = grid_points[0]-3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp parallel for for (j = 1; j <= grid_points[1]-2; j++) { for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i1][j][k] - lhs[n+4][i][j][k]*rhs[m][i2][j][k]; } } } } } /*-------------------------------------------------------------------- c Do the block-diagonal inversion --------------------------------------------------------------------*/ ninvr(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function performs the solution of the approximate factorization c step in the y-direction for all five matrix components c simultaneously. The Thomas algorithm is employed to solve the c systems for the y-lines. Boundary conditions are non-periodic --------------------------------------------------------------------*/ int i, j, k, n, j1, j2, m; double fac1, fac2; /*-------------------------------------------------------------------- c FORWARD ELIMINATION --------------------------------------------------------------------*/ lhsy(); n = 0; for (j = 0; j <= grid_points[1]-3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(m ,k ,fac1 ,i ,j ) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; } lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+3][i][j][k]; lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n+0][i][j2][k]*rhs[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c The last two rows in this grid block are a bit different, c since they do not have two more rows available for the c elimination of off-diagonal entries --------------------------------------------------------------------*/ j = grid_points[1]-2; j1 = grid_points[1]-1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(m ,k ,fac1 ,fac2 ,j ,j1 ,i ) for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; } /*-------------------------------------------------------------------- c scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j1][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k]; } } } /*-------------------------------------------------------------------- c do the u+c and the u-c factors --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(i ,k ,j ,fac1 ,j1 ,m ,n ) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (j = 0; j <= grid_points[1]-3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+3][i][j][k]; lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] - lhs[n+0][i][j2][k]*lhs[n+4][i][j][k]; rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n+0][i][j2][k]*rhs[m][i][j][k]; } } } /*-------------------------------------------------------------------- c And again the last two rows separately --------------------------------------------------------------------*/ j = grid_points[1]-2; j1 = grid_points[1]-1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+3][i][j][k]; lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] - lhs[n+1][i][j1][k]*lhs[n+4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n+1][i][j1][k]*rhs[m][i][j][k]; /*-------------------------------------------------------------------- c Scale the last row immediately --------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j1][k]; rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k]; } } } /*-------------------------------------------------------------------- c BACKSUBSTITUTION --------------------------------------------------------------------*/ j = grid_points[1]-2; j1 = grid_points[1]-1; n = 0; for (m = 0; m < 3; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k]; } } } #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) for (m = 3; m < 5; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) for (k = 1; k <= grid_points[2]-2; k++) { n = (m-3+1)*5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k]; } } } /*-------------------------------------------------------------------- c The first three factors --------------------------------------------------------------------*/ n = 0; #pragma omp parallel for firstprivate(i ,k ,j1 ,j ,m ) for (m = 0; m < 3; m++) { for (j = grid_points[1]-3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k] - lhs[n+4][i][j][k]*rhs[m][i][j2][k]; } } } } /*-------------------------------------------------------------------- c And the remaining two --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(j ,i ,j2 ,k ,m ,n ) for (m = 3; m < 5; m++) { n = (m-3+1)*5; for (j = grid_points[1]-3; j >= 0; j--) { j1 = j + 1; j2 = j1 + 1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { for (k = 1; k <= grid_points[2]-2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j1][k] - lhs[n+4][i][j][k]*rhs[m][i][j2][k]; } } } } } pinvr(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function performs the solution of the approximate factorization c step in the z-direction for all five matrix components c simultaneously. The Thomas algorithm is employed to solve the c systems for the z-lines. Boundary conditions are non-periodic c-------------------------------------------------------------------*/ int i, j, k, n, k1, k2, m; double fac1, fac2; /*-------------------------------------------------------------------- c FORWARD ELIMINATION c-------------------------------------------------------------------*/ lhsz(); n = 0; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(m ,j ,k ,fac1 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { for (k = 0; k <= grid_points[2]-3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; } lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+3][i][j][k]; lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n+0][i][j][k2]*rhs[m][i][j][k]; } } } } /*-------------------------------------------------------------------- c The last two rows in this grid block are a bit different, c since they do not have two more rows available for the c elimination of off-diagonal entries c-------------------------------------------------------------------*/ k = grid_points[2]-2; k1 = grid_points[2]-1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(m ,j ,fac1 ,fac2 ,k ,k1 ,i ) for (j = 1; j <= grid_points[1]-2; j++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; } lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; } /*-------------------------------------------------------------------- c scale the last row immediately c-------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j][k1]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1]; } } } /*-------------------------------------------------------------------- c do the u+c and the u-c factors c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) for (m = 3; m < 5; m++) { n = (m-3+1)*5; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,fac1 ,k1 ,m ,n ) for (j = 1; j <= grid_points[1]-2; j++) { for (k = 0; k <= grid_points[2]-3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+3][i][j][k]; lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] - lhs[n+0][i][j][k2]*lhs[n+4][i][j][k]; rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n+0][i][j][k2]*rhs[m][i][j][k]; } } } /*-------------------------------------------------------------------- c And again the last two rows separately c-------------------------------------------------------------------*/ k = grid_points[2]-2; k1 = grid_points[2]-1; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { for (j = 1; j <= grid_points[1]-2; j++) { fac1 = 1./lhs[n+2][i][j][k]; lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k]; lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k]; rhs[m][i][j][k] = fac1*rhs[m][i][j][k]; lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+3][i][j][k]; lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] - lhs[n+1][i][j][k1]*lhs[n+4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n+1][i][j][k1]*rhs[m][i][j][k]; /*-------------------------------------------------------------------- c Scale the last row immediately (some of this is overkill c if this is the last cell) c-------------------------------------------------------------------*/ fac2 = 1./lhs[n+2][i][j][k1]; rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1]; } } } /*-------------------------------------------------------------------- c BACKSUBSTITUTION c-------------------------------------------------------------------*/ k = grid_points[2]-2; k1 = grid_points[2]-1; n = 0; for (m = 0; m < 3; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ) for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1]; } } } #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) for (m = 3; m < 5; m++) { n = (m-3+1)*5; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) for (j = 1; j <= grid_points[1]-2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1]; } } } /*-------------------------------------------------------------------- c Whether or not this is the last processor, we always have c to complete the back-substitution c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c The first three factors c-------------------------------------------------------------------*/ n = 0; #pragma omp parallel for firstprivate(i ,j ,k1 ,k ,m ,n ) for (m = 0; m < 3; m++) { #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ) for (j = 1; j <= grid_points[1]-2; j++) { for (k = grid_points[2]-3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1] - lhs[n+4][i][j][k]*rhs[m][i][j][k2]; } } } } /*-------------------------------------------------------------------- c And the remaining two c-------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) for (m = 3; m < 5; m++) { n = (m-3+1)*5; #pragma omp parallel for for (i = 1; i <= grid_points[0]-2; i++) { #pragma omp parallel for firstprivate(i ,j ,k ,m ,n ) for (j = 1; j <= grid_points[1]-2; j++) { for (k = grid_points[2]-3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n+3][i][j][k]*rhs[m][i][j][k1] - lhs[n+4][i][j][k]*rhs[m][i][j][k2]; } } } } } tzetar(); }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLab(const double red,const double green, const double blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static void ConvertRGBToLuv(const double red,const double green, const double blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(image,ClampToQuantum(GetPixelIntensity(image,q)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; y_map[i].x=0.010566*i; z_map[i].x=0.002052*i; x_map[i].y=(-0.003296)*i; y_map[i].y=(-0.006471)*i; z_map[i].y=0.009768*i; x_map[i].z=0.009410*i; y_map[i].z=(-0.007880)*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_sRGBTransformImage) #endif proceed=SetImageProgress(image,sRGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image)) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); if (IdentifyImageMonochrome(image,exception) == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); if ((image->colorspace == GRAYColorspace) && (image->gamma != 1.0) && (colorspace == sRGBColorspace)) return(SetImageColorspace(image,colorspace,exception)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,double *red,double *green,double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(image,q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformsRGBImage) #endif proceed=SetImageProgress(image,TransformsRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
vdi_fmt_plug.c
/* VirtualBox (VDI) volume support to John The Ripper * * Written by JimF <jfoug at openwall.net> in 2015. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2015 JimF and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * information about this algorithm taken from: * http://www.sinfocol.org/archivos/2015/07/VBOXDIECracker.phps */ #include "arch.h" #if FMT_EXTERNS_H extern struct fmt_main fmt_vdi; #elif FMT_REGISTERS_H john_register_one(&fmt_vdi); #else #include "aes_xts.h" #include <string.h> #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "crc32.h" #include "johnswap.h" #include "base64_convert.h" #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 16 #else #define OMP_SCALE 4 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct vdi_salt) #define SALT_ALIGN 4 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define MAX_SALT_LEN 32 #define MAX_KEY_LEN 64 #define FORMAT_LABEL "vdi" #define FORMAT_NAME "VirtualBox-VDI AES_XTS" #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME " + AES_XTS" #if SSE_GROUP_SZ_SHA256 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static unsigned char (*key_buffer)[PLAINTEXT_LENGTH + 1]; static unsigned char (*crypt_out)[MAX_SALT_LEN]; #define FORMAT_TAG "$vdi$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) static struct fmt_tests tests[] = { // The 'jtr' test hashed were made with VirtualBox. The others were made with pass_gen.pl {"$vdi$aes-xts256$sha256$2000$2000$64$32$709f6df123f1ccb126ea1f3e565beb78d39cafdc98e0daa2e42cc43cef11f786$0340f137136ad54f59f4b24ef0bf35240e140dfd56bbc19ce70aee6575f0aabf$0a27e178f47a0b05a752d6e917b89ef4205c6ae76705c34858390f8afa6cf03a45d98fab53b76d8d1c68507e7810633db4b83501a2496b7e443eccb53dbc8473$7ac5f4ad6286406e84af31fd36881cf558d375ae29085b08e6f65ebfd15376ca", "jtr"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$d72ee0aecd496b084117bb8d87f5e37de71973518a2ef992c895907a09b73b83$afb33e56a7f81b1e3db70f599b62ecf3d223405abb63bcf569bb29acab9c81a6$3b3769fd3cfaf8e11f67fdc9d54aed8c8962a769f3f66cb2b9cb8700c01a66e6b1c996fdee9727188c765bde224047b8ced7a9b5f5381e7ad7271a9cbf049fde$1c5bca64cbedd76802eddc3e6ffd834e8c1f1ff1157de6ae6feb3740051e2cfa", "password"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$a4e4480927153ecbb7509afb8d49468e62c8bb22aaab458f4115bff63364de41$c69605220d1ed03618f0236a88e225db1ec69e7a95dbe63ee00778cc8b91424e$0a1de9c85452fafd19ceb0821a115c7fec6fab4ef51fc57fabc25bf973417684a78683267513923f88231a6efd2442ce9279f2a5614d4cfcb930b5ef413f34c3$d79ea5522ad79fc409bbcd1e8a2bb75e16a53e1eef940b4fe954cee1c2491fd2", "ripper"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$450ce441592003821931e73ea314dcd0effff1b74b61a8fc4046573d0f448057$18c48e3d7677bc9471607cec83d036b963f23f7bb16f09ea438395b61dcf14d5$c4893bce14fa3a1f915004b9ec0fd6a7215ddebdd2ca4bc2b4ec164253c2f2319685a8f8245ec8e2d9e7a53c6aec5fd2d4ca7ba510ffc7456a72285d40ce7d35$793e58317b9bf6318d1b4cef1e05f5a8579a50fb7efde884ea68b096b7043aad", "john"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$472476df7d16f80d612d4c9ff35678a2011605dc98b76b6d78632859c259d5d0$aa89f9bea1139da6ace97e13c823d713030fda0c8c55ad2fcea358746cc0b4cc$507aaf7c9e00b492042072a17b3975fc88e30e1d5927e63cb335c630b7b873e4c9af2df63c95b42896e15bb33c37c9f572a65f97441b3707ce5d81c521dfd30e$111004a8d9167b55ff5db510cc136f2bceacf4a9f50807742e2bbc110847174e", "really long password with ! stuff!!! ;)"}, // some aes-128 samples They run exactly same speed as the AES-256 hashes. {"$vdi$aes-xts128$sha256$2000$2000$32$32$d3fd2bb27734f25918ac726717b192091253441c4bc71a814d0a6483e73325ea$ef560858b4c068bd8d994cdf038f51cb1b9f59335d72cb874e79a13c5b6aa84a$79ff000f7638d39b0d02ad08dfcede8740087e334e98022465a380bdf78fff13$302f4c4f58c0dee9676dfdaf3ada9e3d7ec4b5bfc7e6565c941f4ec7337368d4", "jtr"}, {"$vdi$aes-xts128$sha256$2000$2000$32$32$16894e7496bac97bc467faa3efe5a3ba009e1591990c9422e4352bfb39ead4d6$00780af3703680b63239b61d0395e9ff673ee843d7a77d61541e0fdc096c49d1$72434a81a27bb1cd85be529600c3620e4eeed45d12f8ef337cc51c040306be7d$4a5b2129577289a8a0f6a93d7a578cd248d158bc70d6ab89f5ccf31704812e85", "blowhard"}, {"$vdi$aes-xts128$sha256$2000$2000$32$32$4e9d103c944479a4e2b2e33d4757e11fc1a7263ba3b2e99d9ad4bc9aeb7f9337$ade43b6eb1d878f0a5532070fb81697a8164ff7b9798e35649df465068ae7e81$f1e443252c872e305eda848d05676a20af8df405262984b39baf0f0aa1b48247$2601e9e08d19ca20745a6a33f74259bdca06014455370b0bb6b79eb0c5e60581", "foobar"}, {NULL} }; static struct vdi_salt { unsigned char salt1[MAX_SALT_LEN]; unsigned char salt2[MAX_SALT_LEN]; unsigned char encr[MAX_KEY_LEN]; int crypt_type; // 1, 256, 384, 512 for the pbkdf2 algo (currently ONLY 256 implemented, so that is all we handle right now). int evp_type; // 128 or 256 for AES-128XTS or AES-256XTS int rounds1; int rounds2; int keylen; int saltlen; } *psalt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key_buffer = mem_calloc(self->params.max_keys_per_crypt, sizeof(*key_buffer)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(key_buffer); MEM_FREE(crypt_out); } static int valid(char* ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int keylen; int saltlen; char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext + FORMAT_TAG_LEN); keeptr = ctcopy; if ((p = strtokm(ctcopy, "$")) == NULL) /* decr type*/ goto err; if (strcmp(p, "aes-xts256") && strcmp(p, "aes-xts128")) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2 algo */ goto err; //if (strcmp(p, "sha1") && strcmp(p, "sha256") && strcmp(p, "sha384") && strcmp(p, "sha512")) if (strcmp(p, "sha256")) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2-1 iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2-2 iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* key length */ goto err; if (!isdec(p)) goto err; keylen = atoi(p); if(keylen > MAX_KEY_LEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if(saltlen > MAX_SALT_LEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt1 */ goto err; if(strlen(p) != saltlen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt2 */ goto err; if(strlen(p) != saltlen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* encr_key */ goto err; if(strlen(p) != keylen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* final_result */ goto err; if(strlen(p) != saltlen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) != NULL) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void set_salt(void *salt) { psalt = salt; } static void* get_salt(char *ciphertext) { static char buf[sizeof(struct vdi_salt)+4]; struct vdi_salt *s = (struct vdi_salt *)mem_align(buf, 4); char *ctcopy, *keeptr; char *p; memset(buf, 0, sizeof(buf)); ctcopy = strdup(ciphertext + FORMAT_TAG_LEN); keeptr = ctcopy; p = strtokm(ctcopy, "$"); /* decr type*/ s->evp_type = !strcmp(p, "aes-xts128") ? 128 : 256; p = strtokm(NULL, "$"); /* pbkdf2 algo */ s->crypt_type = 256; /* right now, we ONLY handle pbkdf2-sha256 */ p = strtokm(NULL, "$"); /* pbkdf2-1 iterations */ s->rounds1 = atoi(p); p = strtokm(NULL, "$"); /* pbkdf2-2 iterations */ s->rounds2 = atoi(p); p = strtokm(NULL, "$"); /* key length */ s->keylen = atoi(p); p = strtokm(NULL, "$"); /* salt length */ s->saltlen = atoi(p); p = strtokm(NULL, "$"); /* salt1 */ base64_convert(p, e_b64_hex, s->saltlen*2, s->salt1, e_b64_raw, s->saltlen, 0); p = strtokm(NULL, "$"); /* salt2 */ base64_convert(p, e_b64_hex, s->saltlen*2, s->salt2, e_b64_raw, s->saltlen, 0); p = strtokm(NULL, "$"); /* encr_key */ base64_convert(p, e_b64_hex, s->keylen*2, s->encr, e_b64_raw, s->keylen, 0); MEM_FREE(keeptr); return s; } static int crypt_all(int *pcount, struct db_salt *salt) { int i; int inc=1; const int count = *pcount; #if SSE_GROUP_SZ_SHA256 inc = SSE_GROUP_SZ_SHA256; #endif #ifdef _OPENMP #pragma omp parallel for #endif for(i = 0; i < count; i += inc) { unsigned char key[MAX_KEY_LEN]; #if SSE_GROUP_SZ_SHA256 unsigned char Keys[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN]; unsigned char Decr[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN]; #else unsigned char Decr[1][MAX_KEY_LEN]; int ksz = strlen((char *)key_buffer[i]); #endif int j; #if SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256]; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { unsigned char *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { lens[j] = strlen((char*)(key_buffer[i+j])); pin[j] = key_buffer[i+j]; x.pout[j] = Keys[j]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt1, psalt->saltlen, psalt->rounds1, &(x.poutc), psalt->keylen, 0); #else pbkdf2_sha256((const unsigned char*)key_buffer[i], ksz, psalt->salt1, psalt->saltlen, psalt->rounds1, key, psalt->keylen, 0); #endif for (j = 0; j < inc; ++j) { #if SSE_GROUP_SZ_SHA256 memcpy(key, Keys[j], sizeof(key)); #endif // Try to decrypt using AES AES_XTS_decrypt(key, Decr[j], psalt->encr, psalt->keylen, psalt->evp_type); } #if SSE_GROUP_SZ_SHA256 for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { lens[j] = psalt->keylen; pin[j] = Decr[j]; x.pout[j] = crypt_out[i+j]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt2, psalt->saltlen, psalt->rounds2, &(x.poutc), psalt->saltlen, 0); #else pbkdf2_sha256(Decr[0], psalt->keylen, psalt->salt2, psalt->saltlen, psalt->rounds2, crypt_out[i], psalt->saltlen, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], 4)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char* key, int index) { strcpy((char*)(key_buffer[index]), key); } static char *get_key(int index) { return (char*)(key_buffer[index]); } static int salt_hash(void *salt) { unsigned v=0, i; unsigned char *psalt = (unsigned char *)salt; psalt += 40; // skips us to the salt stuff. for (i = 0; i < 64; ++i) { v *= 11; v += psalt[i]; } return v & (SALT_HASH_SIZE - 1); } static void *binary(char *ciphertext) { static ARCH_WORD_32 full[MAX_SALT_LEN / 4]; unsigned char *realcipher = (unsigned char*)full; ciphertext = strrchr(ciphertext, '$') + 1; base64_convert(ciphertext, e_b64_hex, strlen(ciphertext), realcipher, e_b64_raw, MAX_SALT_LEN, 0); return (void*)realcipher; } struct fmt_main fmt_vdi = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, "", // BENCHMARK_COMMENT -1, // BENCHMARK_LENGTH 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__second_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_uint32 // A.*B function (eWiseMult): GB_AemultB__second_uint32 // A*D function (colscale): GB_AxD__second_uint32 // D*A function (rowscale): GB_DxB__second_uint32 // C+=B function (dense accum): GB_Cdense_accumB__second_uint32 // C+=b function (dense accum): GB_Cdense_accumb__second_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_uint32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_uint32 // C=A'+scalar GB_bind2nd_tran__second_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = bij #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT32 || GxB_NO_SECOND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__second_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
questao02.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "omp.h" int main() { int *numeros; int sum_s = 0, sum_p = 0; int entrada, i; double t1_s, t2_s, t1_p, t2_p; srand(time(0)); printf("Informe um numero par positivo: "); scanf("%d", &entrada); if(entrada % 2 != 0) { printf("Entrada invalida"); return 1; } numeros = (int *)malloc(entrada * sizeof(int)); for(i = 0; i < entrada; i++) { numeros[i] = abs(rand()); } t1_s = omp_get_wtime(); for(i = 0; i < entrada; i = i + 2) { sum_s += abs(numeros[i] * numeros[i+1]); } t2_s = omp_get_wtime(); t1_p = omp_get_wtime(); #pragma omp parallel { int mult_local = 0; #pragma omp for schedule(static, 1) for(i = 0; i < entrada; i = i + 2) { mult_local = abs(numeros[i] * numeros[i+1]); } #pragma omp critical { sum_p += mult_local; } } t2_p = omp_get_wtime(); printf("\nSequencial"); printf("\nSoma : %d", sum_s); printf("\nTempo: %lf\n", t2_s - t1_s); printf("\nParalelo"); printf("\nSoma : %d", sum_p); printf("\nTempo: %lf\n", t2_p - t1_p); free(numeros); return 0; }
GB_subassign_19.c
//------------------------------------------------------------------------------ // GB_subassign_19: C(I,J)<!M,repl> += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 19: C(I,J)<!M,repl> += scalar ; using S // M: present // Mask_comp: true // C_replace: true // accum: present // A: scalar // S: constructed // C: not bitmap // M: not bitmap #include "GB_subassign_methods.h" GrB_Info GB_subassign_19 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; const int64_t Cnvec = C->nvec ; GB_GET_MASK ; GB_GET_S ; GB_GET_ACCUM_SCALAR ; //-------------------------------------------------------------------------- // Method 19: C(I,J)<!M,repl> += scalar ; using S //-------------------------------------------------------------------------- // Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is // required. The sparsity of !M cannot be exploited. // Methods 13, 15, 17, and 19 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; GB_GET_VECTOR_FOR_IXJ (M, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ; int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // accumulate the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { // both S (i,j) and A (i,j) present GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; GB_GET_VECTOR_FOR_IXJ (M, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ; int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // accumulate the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
paraloopend.h
/** \file * \ingroup elbeem */ // same as grid loop_end + barrier } // i int i=0; //dummy ADVANCE_POINTERS(2*gridLoopBound); } // j # if COMPRESSGRIDS==1 # if PARALLEL==1 //frintf(stderr," (id=%d k=%d) ",id,k); #pragma omp barrier # endif // PARALLEL==1 # else // COMPRESSGRIDS==1 int i=0; //dummy ADVANCE_POINTERS(mLevel[lev].lSizex*2); # endif // COMPRESSGRIDS==1 } // all cell loop k,j,i #pragma omp critical { if(doReduce) { // synchronize global vars for(size_t j=0; j<calcListFull.size() ; j++) mListFull.push_back( calcListFull[j] ); for(size_t j=0; j<calcListEmpty.size(); j++) mListEmpty.push_back( calcListEmpty[j] ); for(size_t j=0; j<calcListParts.size(); j++) mpParticles->addFullParticle( calcListParts[j] ); if(calcMaxVlen>mMaxVlen) { mMxvx = calcMxvx; mMxvy = calcMxvy; mMxvz = calcMxvz; mMaxVlen = calcMaxVlen; } if(0) {debMsgStd("OMP_CRIT",DM_MSG, "reduce id"<<id<<" curr: "<<mMaxVlen<<"|"<<mMxvx<<","<<mMxvy<<","<<mMxvz<< " calc[ "<<calcMaxVlen<<"|"<<calcMxvx<<","<<calcMxvy<<","<<calcMxvz<<"] " ,4 ); } } } // critical } /* main_region */ //?lobOutstrForce = true;
1.race9.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for schedule(dynamic, 4) for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i - 1][j - 1]; } // CHECK: Data Race detected // END
tree-vectorizer.h
/* Vectorizer Copyright (C) 2003-2016 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H #include "tree-data-ref.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* Structure to encapsulate information about a group of like instructions to be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; gimple *stmt; int misalign; }; typedef vec<stmt_info_for_cost> stmt_vector_for_cost; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* A computation tree of an SLP instance. Each node corresponds to a group of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec<slp_tree> children; /* A group of scalar stmts to be vectorized together. */ vec<gimple *> stmts; /* Load permutation relative to the stores, NULL if there is no permutation. */ vec<unsigned> load_permutation; /* Vectorized stmt/s. */ vec<gimple *> vec_stmts; /* Number of vector stmts that are created to replace the group of scalar stmts. It is calculated during the transformation phase as the number of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector size. */ unsigned int vec_stmts_size; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ unsigned int unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec<slp_tree> loads; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* This struct is used to store the information of a data reference, including the data ref itself, the access offset (calculated by summing its offset and init) and the segment length for aliasing checks. This is used to merge alias checks. */ struct dr_with_seg_len { dr_with_seg_len (data_reference_p d, tree len) : dr (d), offset (size_binop (PLUS_EXPR, DR_OFFSET (d), DR_INIT (d))), seg_len (len) {} data_reference_p dr; tree offset; tree seg_len; }; /* This struct contains two dr_with_seg_len objects with aliasing data refs. Two comparisons are generated from them. */ struct dr_with_seg_len_pair_t { dr_with_seg_len_pair_t (const dr_with_seg_len& d1, const dr_with_seg_len& d2) : first (d1), second (d2) {} dr_with_seg_len first; dr_with_seg_len second; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum { bb, loop } kind; /* All SLP instances. */ vec<slp_instance> slp_instances; /* All data references. */ vec<data_reference_p> datarefs; /* All data dependences. */ vec<ddr_p> ddrs; /* All interleaving chains of stores, represented by the first stmt in the chain. */ vec<gimple *> grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; }; struct _loop_vec_info; struct _bb_vec_info; template<> template<> inline bool is_a_helper <_loop_vec_info *>::test (vec_info *i) { return i->kind == vec_info::loop; } template<> template<> inline bool is_a_helper <_bb_vec_info *>::test (vec_info *i) { return i->kind == vec_info::bb; } /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info : public vec_info { /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Threshold of number of iterations below which vectorzation will not be performed. It is calculated from MIN_PROFITABLE_ITERS and PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* Is the loop vectorizable? */ bool vectorizable; /* Unrolling factor */ int vectorization_factor; /* Unknown DRs according to which loop was peeled. */ struct data_reference *unaligned_dr; /* peeling_for_alignment indicates whether peeling for alignment will take place, and what the peeling factor should be: peeling_for_alignment = X means: If X=0: Peeling for alignment will not be applied. If X>0: Peel first X iterations. If X=-1: Generate a runtime test to calculate the number of iterations to be peeled, using the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* The loop nest in which the data dependences are computed. */ vec<loop_p> loop_nest; /* Data Dependence Relations defining address ranges that are candidates for a run-time aliasing check. */ vec<ddr_p> may_alias_ddrs; /* Data Dependence Relations defining address ranges together with segment lengths from which the run-time aliasing check is built. */ vec<dr_with_seg_len_pair_t> comp_alias_ddrs; /* Statements in the loop that have data references that are candidates for a runtime (loop versioning) misalignment check. */ vec<gimple *> may_misalign_stmts; /* The unrolling factor needed to SLP the loop. In case of that pure SLP is applied to the loop, i.e., no unrolling is needed, this is 1. */ unsigned slp_unrolling_factor; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ vec<gimple *> reductions; /* All reduction chains in the loop, represented by the first stmt in the chain. */ vec<gimple *> reduction_chains; /* Cost vector for a single scalar iteration. */ vec<stmt_info_for_cost> scalar_cost_vec; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* When we have grouped data accesses with gaps, we may introduce invalid memory accesses. We peel the last iteration of the loop to prevent this. */ bool peeling_for_gaps; /* When the number of iterations is not a multiple of the vector size we need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* Reductions are canonicalized so that the last operand is the reduction operand. If this places a constant into RHS1, this decanonicalizes GIMPLE for other phases, so we must track when this has occurred and fix it up. */ bool operands_swapped; /* True if there are no loop carried data dependencies in the loop. If loop->safelen <= 1, then this is always true, either the loop didn't have any loop carried data dependencies, or the loop is being vectorized guarded with some runtime alias checks, or couldn't be vectorized at all, but then this field shouldn't be used. For loop->safelen >= 2, the user has asserted that there are no backward dependencies, but there still could be loop carried forward dependencies in such loops. This flag will be false if normal vectorizer data dependency analysis would fail or require versioning for alias, but because of loop->safelen >= 2 it has been vectorized even without versioning for alias. E.g. in: #pragma omp simd for (int i = 0; i < m; i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma ivdep) we can vectorize this and it will DTRT even for k > 0 && k < m, but without safelen we would not vectorize this, so this field would be false. */ bool no_data_dependencies; /* If if-conversion versioned this loop before conversion, this is the loop version without if-conversion. */ struct loop *scalar_loop; /* Mark loops having masked stores. */ bool has_mask_store; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->datarefs #define LOOP_VINFO_DDRS(L) (L)->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->may_alias_ddrs.length () > 0) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) static inline loop_vec_info loop_vec_info_for_loop (struct loop *loop) { return (loop_vec_info) loop->aux; } static inline bool nested_in_vect_loop_p (struct loop *loop, gimple *stmt) { return (loop->inner && (loop->inner == (gimple_bb (stmt))->loop_father)); } typedef struct _bb_vec_info : public vec_info { basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->datarefs #define BB_VINFO_DDRS(B) (B)->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb (basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is in the inner loop, and the use is in the outer loop, and the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* The def is in the inner loop, and the use is in the outer loop (and is not part of reduction). */ vect_used_in_outer, /* defs that feed computations that end up (only) in a reduction. These defs may be used by non-reduction stmts, but eventually, any computations/values that are affected by these defs are used to compute a reduction (i.e. don't get stored to memory, for example). We use this to identify computations that we can change the order in which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; typedef struct data_reference *dr_p; typedef struct _stmt_vec_info { enum stmt_vec_info_type type; /* Indicates whether this stmts is part of a computation whose result is used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ gimple *vectorized_stmt; /** The following is relevant only for stmts that contain a non-scalar data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have at most one such data-ref. **/ /* Information about the data-ref (access function, etc), relative to the inner-most containing loop. */ struct data_reference *data_ref_info; /* Information about the data-ref relative to this loop nest (the loop that is being considered for vectorization). */ tree dr_base_address; tree dr_init; tree dr_offset; tree dr_step; tree dr_aligned_to; /* For loop PHI nodes, the base and evolution part of it. This makes sure this information is still available in vect_update_ivs_after_vectorizer where we may not be able to re-analyze the PHI nodes evolution as peeling for the prologue loop can make it unanalyzable. The evolution part is still correct after peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* Used for various bookkeeping purposes, generally holding a pointer to some other stmt S that is in some way "related" to this stmt. Current use of this field is: If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is true): S is the "pattern stmt" that represents (and replaces) the sequence of stmts that constitutes the pattern. Similarly, the related_stmt of the "pattern stmt" points back to this stmt (which is the last stmt in the original sequence of stmts that constitutes the pattern). */ gimple *related_stmt; /* Used to keep a sequence of def stmts of a pattern stmt if such exists. */ gimple_seq pattern_def_seq; /* List of datarefs that are known to have the same alignment as the dataref of this stmt. */ vec<dr_p> same_align_refs; /* Selected SIMD clone's function info. First vector element is SIMD clone's function decl, followed by a pair of trees (base + step) for linear arguments (pair of NULLs for other arguments). */ vec<tree> simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ gimple *first_element; /* Pointer to the next element in the group. */ gimple *next_element; /* For data-refs, in case that two or more stmts share data-ref, this is the pointer to the previously detected stmt with the same dr. */ gimple *same_dr_stmt; /* The size of the group. */ unsigned int size; /* For stores, number of stores from this group seen. We vectorize the last one. */ unsigned int store_count; /* For loads only, the gap from the previous load. For consecutive loads, GAP is 1. */ unsigned int gap; /* The minimum negative dependence distance this stmt participates in or zero if none. */ unsigned int min_neg_dist; /* Not all stmts in the loop need to be vectorized. e.g, the increment of the loop induction variable and computation of array indexes. relevant indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* Is this statement vectorizable or should it be skipped in (partial) vectorization. */ bool vectorizable; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; } *stmt_vec_info; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) (S)->data_ref_info #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_offset #define STMT_VINFO_DR_STEP(S) (S)->dr_step #define STMT_VINFO_DR_ALIGNED_TO(S) (S)->dr_aligned_to #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element #define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element #define STMT_VINFO_GROUP_SIZE(S) (S)->size #define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count #define STMT_VINFO_GROUP_GAP(S) (S)->gap #define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt #define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define GROUP_FIRST_ELEMENT(S) (S)->first_element #define GROUP_NEXT_ELEMENT(S) (S)->next_element #define GROUP_SIZE(S) (S)->size #define GROUP_STORE_COUNT(S) (S)->store_count #define GROUP_GAP(S) (S)->gap #define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type struct dataref_aux { int misalignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; /* If true we know the base is at least vector element alignment aligned. */ bool base_element_aligned; tree base_decl; }; #define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux) #define VECT_MAX_COST 1000 /* The maximum number of intermediate steps required in multi-step type conversion. */ #define MAX_INTERM_CVT_STEPS 3 /* The maximum vectorization factor supported by any target (V64QI). */ #define MAX_VECTORIZATION_FACTOR 64 extern vec<stmt_vec_info> stmt_vec_info_vec; void init_stmt_vec_info_vec (void); void free_stmt_vec_info_vec (void); /* Return a stmt_vec_info corresponding to STMT. */ static inline stmt_vec_info vinfo_for_stmt (gimple *stmt) { unsigned int uid = gimple_uid (stmt); if (uid == 0) return NULL; return stmt_vec_info_vec[uid - 1]; } /* Set vectorizer information INFO for STMT. */ static inline void set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info) { unsigned int uid = gimple_uid (stmt); if (uid == 0) { gcc_checking_assert (info); uid = stmt_vec_info_vec.length () + 1; gimple_set_uid (stmt, uid); stmt_vec_info_vec.safe_push (info); } else { gcc_checking_assert (info == NULL); stmt_vec_info_vec[uid - 1] = info; } } /* Return the earlier statement between STMT1 and STMT2. */ static inline gimple * get_earlier_stmt (gimple *stmt1, gimple *stmt2) { unsigned int uid1, uid2; if (stmt1 == NULL) return stmt2; if (stmt2 == NULL) return stmt1; uid1 = gimple_uid (stmt1); uid2 = gimple_uid (stmt2); if (uid1 == 0 || uid2 == 0) return NULL; gcc_checking_assert (uid1 <= stmt_vec_info_vec.length () && uid2 <= stmt_vec_info_vec.length ()); if (uid1 < uid2) return stmt1; else return stmt2; } /* Return the later statement between STMT1 and STMT2. */ static inline gimple * get_later_stmt (gimple *stmt1, gimple *stmt2) { unsigned int uid1, uid2; if (stmt1 == NULL) return stmt2; if (stmt2 == NULL) return stmt1; uid1 = gimple_uid (stmt1); uid2 = gimple_uid (stmt2); if (uid1 == 0 || uid2 == 0) return NULL; gcc_assert (uid1 <= stmt_vec_info_vec.length ()); gcc_assert (uid2 <= stmt_vec_info_vec.length ()); if (uid1 > uid2) return stmt1; else return stmt2; } /* Return TRUE if a statement represented by STMT_INFO is a part of a pattern. */ static inline bool is_pattern_stmt_p (stmt_vec_info stmt_info) { gimple *related_stmt; stmt_vec_info related_stmt_info; related_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (related_stmt && (related_stmt_info = vinfo_for_stmt (related_stmt)) && STMT_VINFO_IN_PATTERN_P (related_stmt_info)) return true; return false; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p (basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert (EDGE_COUNT (bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2 (int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost (type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost (type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost (struct loop *loop_info) { return targetm.vectorize.init_cost (loop_info); } /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { return targetm.vectorize.add_stmt_cost (data, count, kind, stmt_info, misalign, where); } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost (void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data (void *data) { targetm.vectorize.destroy_cost_data (data); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ inline void set_dr_misalignment (struct data_reference *dr, int val) { dataref_aux *data_aux = DR_VECT_AUX (dr); if (!data_aux) { data_aux = XCNEW (dataref_aux); dr->aux = data_aux; } data_aux->misalignment = val; } inline int dr_misalignment (struct data_reference *dr) { return DR_VECT_AUX (dr)->misalignment; } /* Reflects actual alignment of first access in the vectorized loop, taking into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Return TRUE if the data access is aligned, and FALSE otherwise. */ static inline bool aligned_access_p (struct data_reference *data_ref_info) { return (DR_MISALIGNMENT (data_ref_info) == 0); } /* Return TRUE if the alignment of the data access is known, and FALSE otherwise. */ static inline bool known_alignment_for_access_p (struct data_reference *data_ref_info) { return (DR_MISALIGNMENT (data_ref_info) != -1); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model (loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* Source location */ extern source_location vect_location; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* Simple loop peeling and versioning utilities for vectorizer's purposes - in tree-vect-loop-manip.c. */ extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree); extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge); struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, struct loop *, edge); extern void vect_loop_versioning (loop_vec_info, unsigned int, bool); extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree, tree, unsigned int, bool); extern void vect_do_peeling_for_alignment (loop_vec_info, tree, unsigned int, bool); extern source_location find_loop_location (struct loop *); extern bool vect_can_advance_ivs_p (loop_vec_info); /* In tree-vect-stmts.c. */ extern unsigned int current_vector_size; extern tree get_vectype_for_scalar_type (tree); extern tree get_mask_type_for_scalar_type (tree); extern tree get_same_sized_vectype (tree, tree); extern bool vect_is_simple_use (tree, vec_info *, gimple **, enum vect_def_type *); extern bool vect_is_simple_use (tree, vec_info *, gimple **, enum vect_def_type *, tree *); extern bool supportable_widening_operation (enum tree_code, gimple *, tree, tree, enum tree_code *, enum tree_code *, int *, vec<tree> *); extern bool supportable_narrowing_operation (enum tree_code, tree, tree, enum tree_code *, int *, vec<tree> *); extern stmt_vec_info new_stmt_vec_info (gimple *stmt, vec_info *); extern void free_stmt_vec_info (gimple *stmt); extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern void vect_model_store_cost (stmt_vec_info, int, bool, enum vect_def_type, slp_tree, stmt_vector_for_cost *, stmt_vector_for_cost *); extern void vect_model_load_cost (stmt_vec_info, int, bool, slp_tree, stmt_vector_for_cost *, stmt_vector_for_cost *); extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern void vect_finish_stmt_generation (gimple *, gimple *, gimple_stmt_iterator *); extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info); extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL); extern tree vect_init_vector (gimple *, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree); extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *, bool *, slp_tree, slp_instance); extern void vect_remove_stores (gimple *); extern bool vect_analyze_stmt (gimple *, bool *, slp_tree); extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *, gimple **, tree, int, slp_tree); extern bool vectorizable_comparison (gimple *, gimple_stmt_iterator *, gimple **, tree, int, slp_tree); extern void vect_get_load_cost (struct data_reference *, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost (struct data_reference *, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift (enum tree_code, tree); extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *, vec<tree> *, slp_tree, int); extern tree vect_gen_perm_mask_any (tree, const unsigned char *); extern tree vect_gen_perm_mask_checked (tree, const unsigned char *); extern void optimize_mask_stores (struct loop*); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int); extern enum dr_alignment_support vect_supportable_dr_alignment (struct data_reference *, bool); extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *, HOST_WIDE_INT *); extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *); extern bool vect_slp_analyze_instance_dependence (slp_instance); extern bool vect_enhance_data_refs_alignment (loop_vec_info); extern bool vect_analyze_data_refs_alignment (loop_vec_info); extern bool vect_verify_datarefs_alignment (loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); extern bool vect_analyze_data_ref_accesses (vec_info *); extern bool vect_prune_runtime_alias_test_list (loop_vec_info); extern tree vect_check_gather_scatter (gimple *, loop_vec_info, tree *, tree *, int *); extern bool vect_analyze_data_refs (vec_info *, int *); extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, bool *, tree = NULL_TREE); extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *, tree); extern tree vect_create_destination_var (tree, tree); extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_grouped_load_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT); extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *, gimple_stmt_iterator *, vec<tree> *); extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load (gimple *, vec<tree> , int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors (gimple *, vec<tree> ); extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char * = NULL); extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *, tree, struct loop *, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern void destroy_loop_vec_info (loop_vec_info, bool); extern gimple *vect_force_simple_reduction (loop_vec_info, gimple *, bool, bool *, bool); /* Drive for loop analysis stage. */ extern loop_vec_info vect_analyze_loop (struct loop *); /* Drive for loop transformation stage. */ extern void vect_transform_loop (loop_vec_info); extern loop_vec_info vect_analyze_loop_form (struct loop *); extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *, gimple **); extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *, gimple **, slp_tree); extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *, gimple **); extern tree get_initial_def_for_reduction (gimple *, tree, tree *); extern int vect_min_worthwhile_factor (enum tree_code); extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance (slp_instance); extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> , gimple_stmt_iterator *, int, slp_instance, bool); extern bool vect_slp_analyze_operations (vec<slp_instance> slp_instances, void *); extern bool vect_schedule_slp (vec_info *); extern bool vect_analyze_slp (vec_info *, unsigned); extern bool vect_make_slp_decision (loop_vec_info); extern void vect_detect_hybrid_slp (loop_vec_info); extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *, int); extern bool vect_slp_bb (basic_block); extern gimple *vect_find_last_scalar_stmt_in_slp (slp_tree); /* In tree-vect-patterns.c. */ /* Pattern recognition functions. Additional pattern recognition functions can (and will) be added in the future. */ typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *); #define NUM_PATTERNS 14 void vect_pattern_recog (vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops (void); void vect_destroy_datarefs (vec_info *); bool vect_stmt_in_region_p (vec_info *, gimple *); #endif /* GCC_TREE_VECTORIZER_H */
GB_binop__bclr_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_int8 // A.*B function (eWiseMult): GB_AemultB__bclr_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_int8 // C+=b function (dense accum): GB_Cdense_accumb__bclr_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_int8 // C=scalar+B GB_bind1st__bclr_int8 // C=scalar+B' GB_bind1st_tran__bclr_int8 // C=A+scalar GB_bind2nd__bclr_int8 // C=A'+scalar GB_bind2nd_tran__bclr_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITCLR (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITCLR (x, y, int8_t, 8) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT8 || GxB_NO_BCLR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bclr_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, int8_t, 8) ; \ } GrB_Info GB_bind1st_tran__bclr_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, int8_t, 8) ; \ } GrB_Info GB_bind2nd_tran__bclr_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB065-pireduction-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Classic PI calculation using reduction */ #define num_steps 2000000000 #include <stdio.h> #include <omp.h> int main(int argc,char **argv) { double pi = 0.0; long i; double x; double interval_width; interval_width = 1.0 / ((double )2000000000); #pragma omp parallel for private (x,i) reduction (+:pi) for (i = 0; i <= ((long )2000000000) - 1; i += 1) { x = (i + 0.5) * interval_width; pi += 1.0 / (x * x + 1.0); } pi = pi * 4.0 * interval_width; printf("PI=%f\n",pi); return 0; }
SwathFileConsumer.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2021. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #pragma once #include <boost/cast.hpp> // Datastructures #include <OpenMS/OPENSWATHALGO/DATAACCESS/DataStructures.h> #include <OpenMS/OPENSWATHALGO/DATAACCESS/SwathMap.h> // Consumers #include <OpenMS/FORMAT/DATAACCESS/MSDataCachedConsumer.h> #include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h> #include <OpenMS/FORMAT/DATAACCESS/MSDataTransformingConsumer.h> // Helpers #include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h> #include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h> #include <OpenMS/INTERFACES/IMSDataConsumer.h> #include <OpenMS/FORMAT/HANDLERS/CachedMzMLHandler.h> #include <OpenMS/KERNEL/StandardTypes.h> #ifdef _OPENMP #include <omp.h> #endif namespace OpenMS { /** * @brief Abstract base class which can consume spectra coming from SWATH experiment stored in a single file. * * The class consumes spectra which are coming from a complete SWATH * experiment. It will group MS2 spectra by their precursor m/z, assuming * that they correspond to the same SWATH window. For example, the spectra * could be arranged in the following fashion: * * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * - MS2 Spectrum (precursor = [1175,1200]) * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * * Base classes are expected to implement functions consuming a spectrum coming * from a specific SWATH or an MS1 spectrum and a final function * ensureMapsAreFilled_ after which the swath_maps_ vector needs to contain * valid pointers to MSExperiment. * * In addition it is possible to provide the swath boundaries and the read in * spectra will be matched by their precursor m/z to the "center" attribute * of the provided Swath maps. * * Usage: * * @code * FullSwathFileConsumer * dataConsumer; * // assign dataConsumer to an implementation of FullSwathFileConsumer * MzMLFile().transform(file, dataConsumer); * dataConsumer->retrieveSwathMaps(maps); * @endcode * */ class OPENMS_DLLAPI FullSwathFileConsumer : public Interfaces::IMSDataConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; FullSwathFileConsumer() : ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } /** * @brief Constructor * * @param swath_boundaries A vector of SwathMaps of which only the center, * lower and upper attributes will be used to infer the expected Swath maps. * */ FullSwathFileConsumer(std::vector<OpenSwath::SwathMap> swath_boundaries) : swath_map_boundaries_(swath_boundaries), ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } ~FullSwathFileConsumer() override {} void setExpectedSize(Size, Size) override {} void setExperimentalSettings(const ExperimentalSettings& exp) override {settings_ = exp; } /** * @brief Populate the vector of swath maps after consuming all spectra. * * Will populate the input vector with SwathMap objects which correspond to * the MS1 map (if present) and the MS2 maps (SWATH maps). This should be * called after all spectra are consumed. * * @note It is not possible to consume any more spectra after calling this * function (it contains finalization code and may close file streams). * */ void retrieveSwathMaps(std::vector<OpenSwath::SwathMap>& maps) { consuming_possible_ = false; // make consumption of further spectra / chromatograms impossible ensureMapsAreFilled_(); if (ms1_map_) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(ms1_map_); map.lower = -1; map.upper = -1; map.center = -1; map.ms1 = true; maps.push_back(map); } // Print warning if the lower/upper window could not be determined and we // required manual determination of the boundaries. if (!use_external_boundaries_ && correct_window_counter_ != swath_maps_.size()) { std::cout << "WARNING: Could not correctly read the upper/lower limits of the SWATH windows from your input file. Read " << correct_window_counter_ << " correct (non-zero) window limits (expected " << swath_maps_.size() << " windows)." << std::endl; } size_t nonempty_maps = 0; for (Size i = 0; i < swath_maps_.size(); i++) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(swath_maps_[i]); map.lower = swath_map_boundaries_[i].lower; map.upper = swath_map_boundaries_[i].upper; map.center = swath_map_boundaries_[i].center; map.ms1 = false; maps.push_back(map); if (map.sptr->getNrSpectra() > 0) {nonempty_maps++;} } if (nonempty_maps != swath_map_boundaries_.size()) { std::cout << "WARNING: The number nonempty maps found in the input file (" << nonempty_maps << ") is not equal to the number of provided swath window boundaries (" << swath_map_boundaries_.size() << "). Please check your input." << std::endl; } } /// Consume a chromatogram -> should not happen when dealing with SWATH maps void consumeChromatogram(MapType::ChromatogramType&) override { std::cerr << "Read chromatogram while reading SWATH files, did not expect that!" << std::endl; } /** * @brief * Consume a spectrum which may belong either to an MS1 scan or * one of n MS2 (SWATH) scans * */ void consumeSpectrum(MapType::SpectrumType& s) override { if (!consuming_possible_) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FullSwathFileConsumer cannot consume any more spectra after retrieveSwathMaps has been called already"); } if (s.getMSLevel() == 1) { consumeMS1Spectrum_(s); } else { if (s.getPrecursors().empty()) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Swath scan does not provide a precursor."); } const std::vector<Precursor> prec = s.getPrecursors(); double center = prec[0].getMZ(); double lower = prec[0].getMZ() - prec[0].getIsolationWindowLowerOffset(); double upper = prec[0].getMZ() + prec[0].getIsolationWindowUpperOffset(); bool found = false; // Check if enough information is present to infer the swath if (center <= 0.0) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Swath scan does not provide any precursor isolation information."); } // try to match the current scan to one of the already known windows for (Size i = 0; i < swath_map_boundaries_.size(); i++) { // We group by the precursor mz (center of the window) since this // should be present in all SWATH scans. if (std::fabs(center - swath_map_boundaries_[i].center) < 1e-6) { found = true; consumeSwathSpectrum_(s, i); break; } } if (!found) { if (use_external_boundaries_) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Encountered SWATH scan with boundary ") + center + " m/z which was not present in the provided windows."); } else { consumeSwathSpectrum_(s, swath_map_boundaries_.size()); // we found a new SWATH window if (lower > 0.0 && upper > 0.0) {correct_window_counter_++;} OpenSwath::SwathMap boundary; boundary.lower = lower; boundary.upper = upper; boundary.center = center; swath_map_boundaries_.push_back(boundary); OPENMS_LOG_DEBUG << "Adding Swath centered at " << center << " m/z with an isolation window of " << lower << " to " << upper << " m/z." << std::endl; } } } } protected: /** * @brief Consume an MS2 spectrum belonging to SWATH "swath_nr" * * This function should handle a spectrum belonging to a specific SWATH * (indicated by swath_nr). * */ virtual void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) = 0; /** * @brief Consume an MS1 spectrum * * This function should handle an MS1 spectrum. * */ virtual void consumeMS1Spectrum_(MapType::SpectrumType& s) = 0; /** * @brief Callback function after the reading is complete * * Has to ensure that swath_maps_ and ms1_map_ are correctly populated. */ virtual void ensureMapsAreFilled_() = 0; /// A list of Swath map identifiers (lower/upper boundary and center) std::vector<OpenSwath::SwathMap> swath_map_boundaries_; /// A list of SWATH maps and the MS1 map std::vector<boost::shared_ptr<PeakMap > > swath_maps_; boost::shared_ptr<PeakMap > ms1_map_; /// The Experimental settings // (MSExperiment has no constructor using ExperimentalSettings) PeakMap settings_; /// Whether further spectra can still be consumed bool consuming_possible_; /// Whether to use external input for SWATH boundaries bool use_external_boundaries_; /// How many windows were correctly annotated (non-zero window limits) size_t correct_window_counter_; }; /** * @brief In-memory implementation of FullSwathFileConsumer * * Keeps all the spectra in memory by just appending them to an MSExperiment. * */ class OPENMS_DLLAPI RegularSwathFileConsumer : public FullSwathFileConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; RegularSwathFileConsumer() {} RegularSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries) : FullSwathFileConsumer(known_window_boundaries) {} protected: void addNewSwathMap_() { boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) override { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_maps_[swath_nr]->addSpectrum(s); } void addMS1Map_() { boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) override { if (!ms1_map_) { addMS1Map_(); } ms1_map_->addSpectrum(s); } void ensureMapsAreFilled_() override {} }; /** * @brief On-disk cached implementation of FullSwathFileConsumer * * Writes all spectra immediately to disk in a user-specified caching * location using the MSDataCachedConsumer. Internally, it handles * n+1 (n SWATH + 1 MS1 map) objects of MSDataCachedConsumer which can consume the * spectra and write them to disk immediately. * */ class OPENMS_DLLAPI CachedSwathFileConsumer : public FullSwathFileConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; CachedSwathFileConsumer(String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} CachedSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries, String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : FullSwathFileConsumer(known_window_boundaries), ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} ~CachedSwathFileConsumer() override { // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != nullptr) { delete ms1_consumer_; ms1_consumer_ = nullptr; } } protected: void addNewSwathMap_() { String meta_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML"; String cached_file = meta_file + ".cached"; MSDataCachedConsumer* consumer = new MSDataCachedConsumer(cached_file, true); consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0); swath_consumers_.push_back(consumer); // maps for meta data boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) override { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_consumers_[swath_nr]->consumeSpectrum(s); // write data to cached file; clear data from spectrum s swath_maps_[swath_nr]->addSpectrum(s); // append for the metadata (actual data was deleted) } void addMS1Map_() { String meta_file = cachedir_ + basename_ + "_ms1.mzML"; String cached_file = meta_file + ".cached"; ms1_consumer_ = new MSDataCachedConsumer(cached_file, true); ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0); boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) override { if (ms1_consumer_ == nullptr) { addMS1Map_(); } ms1_consumer_->consumeSpectrum(s); ms1_map_->addSpectrum(s); // append for the metadata (actual data is deleted) } void ensureMapsAreFilled_() override { size_t swath_consumers_size = swath_consumers_.size(); bool have_ms1 = (ms1_consumer_ != nullptr); // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream // The file streams to the cached data on disc can and should be closed // here safely. Since ensureMapsAreFilled_ is called after consuming all // the spectra, there will be no more spectra to append but the client // might already want to read after this call, so all data needs to be // present on disc and the file streams closed. // // TODO merge with destructor code into own function! while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != nullptr) { delete ms1_consumer_; ms1_consumer_ = nullptr; } if (have_ms1) { boost::shared_ptr<PeakMap > exp(new PeakMap); String meta_file = cachedir_ + basename_ + "_ms1.mzML"; // write metadata to disk and store the correct data processing tag Internal::CachedMzMLHandler().writeMetadata(*ms1_map_, meta_file, true); MzMLFile().load(meta_file, *exp.get()); ms1_map_ = exp; } #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_consumers_size); i++) { boost::shared_ptr<PeakMap > exp(new PeakMap); String meta_file = cachedir_ + basename_ + "_" + String(i) + ".mzML"; // write metadata to disk and store the correct data processing tag Internal::CachedMzMLHandler().writeMetadata(*swath_maps_[i], meta_file, true); MzMLFile().load(meta_file, *exp.get()); swath_maps_[i] = exp; } } MSDataCachedConsumer* ms1_consumer_; std::vector<MSDataCachedConsumer*> swath_consumers_; String cachedir_; String basename_; int nr_ms1_spectra_; std::vector<int> nr_ms2_spectra_; }; /** * @brief On-disk mzML implementation of FullSwathFileConsumer * * Writes all spectra immediately to disk to an mzML file location using the * PlainMSDataWritingConsumer. Internally, it handles n+1 (n SWATH + 1 MS1 * map) objects of MSDataCachedConsumer which can consume the spectra and * write them to disk immediately. * * Warning: no swathmaps (MS1 nor MS2) will be available when calling retrieveSwathMaps() * for downstream use. * */ class OPENMS_DLLAPI MzMLSwathFileConsumer : public FullSwathFileConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; MzMLSwathFileConsumer(const String& cachedir, const String& basename, Size nr_ms1_spectra, const std::vector<int>& nr_ms2_spectra) : ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} MzMLSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries, const String& cachedir, const String& basename, Size nr_ms1_spectra, const std::vector<int>& nr_ms2_spectra) : FullSwathFileConsumer(known_window_boundaries), ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} ~MzMLSwathFileConsumer() override { deleteSetNull_(); } protected: void deleteSetNull_() { // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != nullptr) { delete ms1_consumer_; ms1_consumer_ = nullptr; } } void addNewSwathMap_() { String mzml_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML"; PlainMSDataWritingConsumer* consumer = new PlainMSDataWritingConsumer(mzml_file); consumer->getOptions().setCompression(true); consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0); swath_consumers_.push_back(consumer); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) override { // only use swath_consumers_ to count how many we have already added while (swath_consumers_.size() <= swath_nr) { addNewSwathMap_(); } swath_consumers_[swath_nr]->consumeSpectrum(s); s.clear(false); } void addMS1Map_() { String mzml_file = cachedir_ + basename_ + "_ms1.mzML"; ms1_consumer_ = new PlainMSDataWritingConsumer(mzml_file); ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0); ms1_consumer_->getOptions().setCompression(true); } void consumeMS1Spectrum_(MapType::SpectrumType& s) override { if (ms1_consumer_ == nullptr) { addMS1Map_(); } ms1_consumer_->consumeSpectrum(s); } void ensureMapsAreFilled_() override { deleteSetNull_(); } PlainMSDataWritingConsumer* ms1_consumer_; std::vector<PlainMSDataWritingConsumer*> swath_consumers_; String cachedir_; String basename_; int nr_ms1_spectra_; std::vector<int> nr_ms2_spectra_; }; }
hhalign_wrapper.c
/* -*- mode: c; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /********************************************************************* * Clustal Omega - Multiple sequence alignment * * Copyright (C) 2010 University College Dublin * * Clustal-Omega is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is part of Clustal-Omega. * ********************************************************************/ /* * RCS $Id: hhalign_wrapper.c 306 2016-06-13 13:49:04Z fabian $ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <assert.h> #include <ctype.h> #include <stdbool.h> #include "seq.h" #include "tree.h" #include "progress.h" #include "hhalign/general.h" #include "hhalign/hhfunc.h" #include "hhalign/hhalign.h" /* up to this level (from leaf) will background HMM info be applied */ #define APPLY_BG_HMM_UP_TO_TREE_DEPTH 10 #define TIMING 0 #define TRACE 0 /** * @brief FIXME * * @note prHalignPara has to point to an already allocated instance * */ void SetDefaultHhalignPara(hhalign_para *prHhalignPara) { prHhalignPara->iMacRamMB = 8000; /* default|give just under 8GB to MAC algorithm, FS, 2016-04-18, went from 2GB to 8GB */ prHhalignPara->bIsDna = false; /* protein mode unless we say otherwise */ prHhalignPara->bIsRna = false; prHhalignPara->pca = -UNITY; prHhalignPara->pcb = -UNITY; prHhalignPara->pcc = -UNITY; prHhalignPara->pcw = -UNITY; prHhalignPara->gapb = -UNITY; prHhalignPara->gapd = -UNITY; prHhalignPara->gape = -UNITY; prHhalignPara->gapf = -UNITY; prHhalignPara->gapg = -UNITY; prHhalignPara->gaph = -UNITY; prHhalignPara->gapi = -UNITY; prHhalignPara->pcaV = -UNITY; prHhalignPara->pcbV = -UNITY; prHhalignPara->pccV = -UNITY; prHhalignPara->pcwV = -UNITY; prHhalignPara->gapbV = -UNITY; prHhalignPara->gapdV = -UNITY; prHhalignPara->gapeV = -UNITY; prHhalignPara->gapfV = -UNITY; prHhalignPara->gapgV = -UNITY; prHhalignPara->gaphV = -UNITY; prHhalignPara->gapiV = -UNITY; } /*** end: SetDefaultHhalignPara() ***/ /** * @brief get rid of unknown residues * * @note HHalignWrapper can be entered in 2 different ways: (i) all * sequences are un-aligned (ii) there are 2 (aligned) profiles. in * the un-aligned case (i) the sequences come straight from Squid, * that is, they have been sanitised, all non-alphabetic residues * have been rendered as X's. In profile mode (ii) one profile may * have been produced internally. In that case residues may have * been translated back into their 'native' form, that is, they may * contain un-sanitised residues. These will cause trouble during * alignment * FS, r213->214 */ void SanitiseUnknown(mseq_t *mseq) { int iS; /* iterator for sequence */ /*int iR;*/ /* iterator for residue */ /*int iLen;*/ /* length of sequence */ char *pcRes = NULL; for (iS = 0; iS < mseq->nseqs; iS++){ for (pcRes = mseq->seq[iS]; '\0' != *pcRes; pcRes++){ if (isgap(*pcRes)){ /* don't like MSF gap characters ('~'), sanitise them (and '.' and ' '); FS, r258 -> r259 */ *pcRes = '-'; continue; } if (mseq->seqtype==SEQTYPE_PROTEIN) { if (NULL == strchr(AMINO_ALPHABET, toupper(*pcRes))) { *pcRes = AMINOACID_ANY; } } else if (mseq->seqtype==SEQTYPE_DNA) { if (NULL == strchr(DNA_ALPHABET, toupper(*pcRes))) { *pcRes = NUCLEOTIDE_ANY; } } else if (mseq->seqtype==SEQTYPE_RNA) { if (NULL == strchr(RNA_ALPHABET, toupper(*pcRes))) { *pcRes = NUCLEOTIDE_ANY; } } } /* !EO String */ } /* 0 <= iS < mseq->nseqs */ return; } /*** end: SanitiseUnknown() ***/ /** * @brief translate unknown residues back to ambiguity codes; * hhalign translates ambiguity codes (B,Z) into unknown residue (X). * we still have the original (un-aligned) residue information, * by iterating along the original and aligned sequences we can * reconstruct where codes have been changed and restore them * to their original value * * @param[in,out] mseq * sequence/profile data, mseq->seq [in,out] is changed to conform * with mseq->orig_seq [in] * */ void TranslateUnknown2Ambiguity(mseq_t *mseq) { int iS; /* iterator for sequence */ int iR, iRo; /* iterator for residue (original) */ int iChange, iCase, iAmbi; /* counts how many replacements */ static int siOffset = 'a' - 'A'; for (iS = 0; iS < mseq->nseqs; iS++){ iR = iRo = 0; iChange = iCase = iAmbi = 0; while(('\0' != mseq->seq[iS][iR]) && ('\0' != mseq->orig_seq[iS][iRo])) { /* skip gaps in aligned sequences */ while(isgap(mseq->seq[iS][iR])) { iR++; } /* was gap in unaligned seq * this should probably not happen */ while(isgap(mseq->orig_seq[iS][iRo])) { iRo++; } /* was gap in aligned seq */ /* check if we reached the end of the sequence after * skipping the gaps */ if ( ('\0' == mseq->seq[iS][iR]) || ('\0' == mseq->orig_seq[iS][iRo]) ){ break; } if (mseq->seq[iS][iR] != mseq->orig_seq[iS][iRo]){ /* FIXME: count replacements, discard case changes */ iChange++; if ( (mseq->seq[iS][iR] == mseq->orig_seq[iS][iRo]+siOffset) || (mseq->seq[iS][iR] == mseq->orig_seq[iS][iRo]-siOffset) ){ iCase++; } else { iAmbi++; } #if TRACE Log(&rLog, LOG_FORCED_DEBUG, "seq=%d, pos=(%d:%d), (%c:%c)", iS, iR, iRo, mseq->seq[iS][iR], mseq->orig_seq[iS][iRo]); #endif mseq->seq[iS][iR] = mseq->orig_seq[iS][iRo]; } iR++; iRo++; } /* !EO seq */ Log(&rLog, LOG_DEBUG, "in seq %d re-translated %d residue codes (%d true, %d case)", iS, iChange, iAmbi, iCase); /* assert that both sequences (un/aligned) have terminated */ /* skip gaps in aligned sequences */ while(isgap(mseq->seq[iS][iR])) { iR++; } /* was gap in unaligned seq * this should probably not happen */ while(isgap(mseq->orig_seq[iS][iRo])) { iRo++; } /* was gap in aligned seq */ if ( ('\0' != mseq->seq[iS][iR]) || ('\0' != mseq->orig_seq[iS][iRo]) ){ Log(&rLog, LOG_FATAL, "inconsistency in un/aligned sequence %d\n>%s\n>%s\n", iS, mseq->seq[iS], mseq->orig_seq[iS]); } } /* 0 <= iS < mseq->nseqs */ } /*** end: TranslateUnknown2Ambiguity() ***/ /** * @brief re-attach leading and trailing gaps to alignment * * @param[in,out] prMSeq * alignment structure (at this stage there should be no un-aligned sequences) * @param[in] iProfProfSeparator * gives sizes of input profiles, -1 if no input-profiles but un-aligned sequences * * @note leading and tailing profile columns * that only contain gaps have no effect on the alignment * and are removed during the alignment. if they are * encountered a warning message is printed to screen. * some users like to preserve these gap columns * FS, r213->214 */ void ReAttachLeadingGaps(mseq_t *prMSeq, int iProfProfSeparator) { int i, j; int iSize1 = 0; /* #seqs in 1st profile */ int iSize2 = 0; /* #seqs in 2nd profile */ int iPPS = iProfProfSeparator; int iLeadO1 = 0; /* leading gaps in 1st seq of 1st profile */ int iLeadO2 = 0; /* leading gaps in 1st seq of 2nd profile */ int iLeadA1 = 0; /* leading gaps in 1st seq of final alignment */ int iLeadA2 = 0; /* leading gaps in PPS seq of final alignment */ int iTrailO1 = 0; /* trailing gaps in 1st seq of 1st profile */ int iTrailO2 = 0; /* trailing gaps in 1st seq of 2nd profile */ int iTrailA1 = 0; /* trailing gaps in 1st seq of final alignment */ int iTrailA2 = 0; /* trailing gaps in PPS seq of final alignment */ int iLen = 0; /* length of final alignment */ int iLen1 = 0; /* length of 1st profile */ int iLen2 = 0; /* length of 2nd profile */ int iCutHead = 0; /* make up truncation at head */ int iCutTail = 0; /* make up truncation at tail */ char *pcIter = NULL; if (-1 == iProfProfSeparator){ return; } else { assert(prMSeq->aligned); assert(prMSeq->nseqs > iProfProfSeparator); } iSize1 = iProfProfSeparator; iSize2 = prMSeq->nseqs - iProfProfSeparator; iLen = strlen(prMSeq->seq[0]); iLen1 = strlen(prMSeq->orig_seq[0]); iLen2 = strlen(prMSeq->orig_seq[iPPS]); /* count leading/trailing gaps in 1st sequence of 1st/2nd profile and final alignmant */ for (iLeadO1 = 0, pcIter = prMSeq->orig_seq[0]; isgap(*pcIter); pcIter++, iLeadO1++); for (iLeadO2 = 0, pcIter = prMSeq->orig_seq[iPPS]; isgap(*pcIter); pcIter++, iLeadO2++); for (iLeadA1 = 0, pcIter = prMSeq->seq[0]; isgap(*pcIter); pcIter++, iLeadA1++); for (iLeadA2 = 0, pcIter = prMSeq->seq[iPPS]; isgap(*pcIter); pcIter++, iLeadA2++); for (iTrailO1 = 0, pcIter = &prMSeq->orig_seq[0][iLen1-1]; isgap(*pcIter); pcIter--, iTrailO1++); for (iTrailO2 = 0, pcIter = &prMSeq->orig_seq[iPPS][iLen2-1]; isgap(*pcIter); pcIter--, iTrailO2++); for (iTrailA1 = 0, pcIter = &prMSeq->seq[0][iLen-1]; isgap(*pcIter); pcIter--, iTrailA1++); for (iTrailA2 = 0, pcIter = &prMSeq->seq[iPPS][iLen-1]; isgap(*pcIter); pcIter--, iTrailA2++); /* turn leading/trailing gaps into truncation */ iLeadO1 = iLeadO1 > iLeadA1 ? iLeadO1-iLeadA1 : 0; iLeadO2 = iLeadO2 > iLeadA2 ? iLeadO2-iLeadA2 : 0; iTrailO1 = iTrailO1 > iTrailA1 ? iTrailO1-iTrailA1 : 0; iTrailO2 = iTrailO2 > iTrailA2 ? iTrailO2-iTrailA2 : 0; iCutHead = iLeadO1 > iLeadO2 ? iLeadO1 : iLeadO2; iCutTail = iTrailO1 > iTrailO2 ? iTrailO1 : iTrailO2; /* re-allocate and shift memory */ if ( (iCutHead > 0) || (iCutTail > 0) ){ /* skip if no re-attachment, FS, r244 -> r245 */ for (i = 0; i < prMSeq->nseqs; i++){ CKREALLOC(prMSeq->seq[i], iLen+iCutHead+iCutTail+2); if (iCutHead > 0){ /* skip if no re-attachment, FS, r244 -> r245 */ memmove(prMSeq->seq[i]+iCutHead, prMSeq->seq[i], iLen); } for (j = 0; j < iCutHead; j++){ prMSeq->seq[i][j] = '-'; } for (j = iLen+iCutHead; j < iLen+iCutHead+iCutTail; j++){ prMSeq->seq[i][j] = '-'; } prMSeq->seq[i][j] = '\0'; } } /* (iCutHead > 0) || (iCutTail > 0) */ } /*** end: ReAttachLeadingGaps() ***/ /** * @brief reallocate enough memory for alignment and * attach sequence pointers to profiles * * @param[in,out] mseq * sequence/profile data, increase memory for sequences in profiles * @param[out] ppcProfile1 * pointers to sequencese in 1st profile * @param[out] ppcProfile2 * pointers to sequencese in 2nd profile * @param[out] pdWeightsL * weights (normalised to 1.0) for sequences in left profile * @param[out] pdWeightsR * weights (normalised to 1.0) for sequences in right profile * @param[in] pdSeqWeights * weights for _all_ sequences in alignment * @param[in] iLeafCountL * number of sequences in 1st profile * @param[in] piLeafListL * array of integer IDs of sequences in 1st profile * @param[in] iLeafCountR * number of sequences in 2nd profile * @param[in] piLeafListR * array of integer IDs of sequences in 2nd profile * */ void PrepareAlignment(mseq_t *mseq, char **ppcProfile1, char **ppcProfile2, double *pdWeightsL, double *pdWeightsR, double *pdSeqWeights, int iLeafCountL, int *piLeafListL, int iLeafCountR, int *piLeafListR) { int iLenL = 0; /* length of 1st profile */ int iLenR = 0; /* length of 2nd profile */ int iMaxLen = 0; /* maximum possible length of alignment */ int i; /* aux */ double dWeight = 0.00; double dWeightInv = 0.00; assert(NULL!=mseq); assert(NULL!=ppcProfile1); assert(NULL!=ppcProfile2); assert(NULL!=piLeafListL); assert(NULL!=piLeafListR); /* get length of profiles, * in a profile all sequences should have same length so only look at 1st */ iLenL = strlen(mseq->seq[piLeafListL[0]]); iLenR = strlen(mseq->seq[piLeafListR[0]]); iMaxLen = iLenL + iLenR + 1; /* reallocate enough memory for sequences in alignment (for adding * gaps) */ for (i = 0; i < iLeafCountL; i++){ mseq->seq[piLeafListL[i]] = CKREALLOC(mseq->seq[piLeafListL[i]], iMaxLen); } for (i = 0; i < iLeafCountR; i++){ mseq->seq[piLeafListR[i]] = CKREALLOC(mseq->seq[piLeafListR[i]], iMaxLen); } /* attach sequences to profiles */ for (i = 0; i < iLeafCountL; i++){ ppcProfile1[i] = mseq->seq[piLeafListL[i]]; } ppcProfile1[i] = NULL; for (i = 0; i < iLeafCountR; i++){ ppcProfile2[i] = mseq->seq[piLeafListR[i]]; } ppcProfile2[i] = NULL; /* remove terminal 'X' for single sequences: * it is a quirk of hhalign() to delete 2 individual sequences * if 2 terminal X's meet during alignment, * just replace (one of) them. * this can be undone at the end. * profiles -consisting of more than 1 sequence- * appear to be all-right. * there seems to be no problem with B's and Z's */ if ( (1 == iLeafCountL) && (1 == iLeafCountR) ){ if ( ('X' == ppcProfile1[0][0]) && ('X' == ppcProfile2[0][0]) ){ #define NOTX 'N' ppcProfile1[0][0] = NOTX; /* FIXME: arbitrary assignment */ ppcProfile2[0][0] = NOTX; /* FIXME: arbitrary assignment */ } if ( ('X' == ppcProfile1[0][iLenL-1]) && ('X' == ppcProfile2[0][iLenR-1]) ){ ppcProfile1[0][iLenL-1] = NOTX; /* FIXME: arbitrary assignment */ ppcProfile2[0][iLenR-1] = NOTX; /* FIXME: arbitrary assignment */ } } /* obtain sequence weights */ if (NULL != pdSeqWeights){ dWeight = 0.00; for (i = 0; i < iLeafCountL; i++){ register double dAux = pdSeqWeights[piLeafListL[i]]; #ifndef NDEBUG if (dAux <= 0.00){ Log(&rLog, LOG_DEBUG, "seq-weight %d = %f", piLeafListL[i], dAux); } #endif pdWeightsL[i] = dAux; dWeight += dAux; } /* 0 <= i < iLeafCountL */ dWeightInv = 1.00 / dWeight; for (i = 0; i < iLeafCountL; i++){ pdWeightsL[i] *= dWeightInv; } dWeight = 0.00; for (i = 0; i < iLeafCountR; i++){ register double dAux = pdSeqWeights[piLeafListR[i]]; #ifndef NDEBUG if (dAux <= 0.00){ Log(&rLog, LOG_DEBUG, "seq-weight %d = %f", piLeafListR[i], dAux); } #endif pdWeightsR[i] = dAux; dWeight += dAux; } /* 0 <= i < iLeafCountL */ dWeightInv = 1.00 / dWeight; for (i = 0; i < iLeafCountR; i++){ pdWeightsR[i] *= dWeightInv; } } /* (NULL != pdSeqWeights) */ else { pdWeightsL[0] = pdWeightsR[0] = -1.00; } #if TRACE for (i = 0; i < iLeafCountL; i++){ Log(&rLog, LOG_FORCED_DEBUG, "ppcProfile1[%d/%d] pointing to mseq %d = %s", i, iLeafCountR, piLeafListL[i], ppcProfile1[i]); } for (i = 0; i < iLeafCountR; i++){ Log(&rLog, LOG_FORCED_DEBUG, "ppcProfile2[%d/%d] pointing to mseq %d = %s", i, iLeafCountR, piLeafListR[i], ppcProfile2[i]); } #endif return; } /*** end: PrepareAlignment() ***/ /** * @brief PosteriorProbabilities() calculates posterior probabilities * of aligning a single sequences on-to an alignment containing this sequence * * @param[in] prMSeq * holds the aligned sequences [in] * @param[in] rHMMalignment * HMM of the alignment in prMSeq * @param[in] rHhalignPara * various parameters read from commandline, needed by hhalign() * @param[in] pcPosteriorfile * name of file into which posterior probability information is written * * @return score of the alignment FIXME what is this? * * @note the PP-loop can be parallelised easily FIXME */ int PosteriorProbabilities(mseq_t *prMSeq, hmm_light rHMMalignment, hhalign_para rHhalignPara, char *pcPosteriorfile) { double dScore = 0.0; int i; int iS; /* iterator for sequences */ int iNseq = prMSeq->nseqs; /* number of sequences */ int iLenHMM = rHMMalignment.L; /* length of alignment */ int iViterbiCount = 0; /* counts how often Viterbi is triggered */ char **ppcCopy = NULL; char **ppcRepresent = NULL; char zcAux[10000] = {0}; char zcError[10000] = {0}; hhalign_scores *prHHscores = NULL; FILE *pfPP = NULL; pfPP = fopen(pcPosteriorfile, "w"); fprintf(pfPP, "#1.i\t2.name\t\t3.L1\t4.L2\t5.sum\t\t6.sum/L1\t7.HH\n"); prHHscores = CKCALLOC(iNseq, sizeof(hhalign_scores)); for (iS = 0; iS < iNseq; iS++){ memset(&(prHHscores[iS]), 0, sizeof(hhalign_scores)); } ppcCopy = CKCALLOC(1, sizeof(char *)); ppcCopy[0] = CKCALLOC(2*iLenHMM, sizeof(char)); ppcRepresent = CKCALLOC(1, sizeof(char *)); ppcRepresent[0] = CKCALLOC(2*iLenHMM, sizeof(char)); for (i = 0; i < iLenHMM; i++){ ppcRepresent[0][i] = rHMMalignment.seq[rHMMalignment.ncons][i+1]; } /* FIXME: this loop can be parallelised, FS r288 */ iViterbiCount = 0; for (iS = 0; iS < iNseq; iS++){ /* single sequences may very well contain leading/trailing gaps, * this will trigger warning in Transfer:hhalignment-C.h */ char *pcIter = NULL; for (pcIter = prMSeq->orig_seq[iS]; ('-' == *pcIter) || ('.' == *pcIter); pcIter++); strcpy(ppcCopy[0], pcIter/*prMSeq->orig_seq[iS]*/); for (pcIter = &ppcCopy[0][strlen(ppcCopy[0])-1]; ('-' == *pcIter) || ('.' == *pcIter); pcIter--); pcIter++; *pcIter = '\0'; zcError[0] = '\0'; hhalign(ppcCopy, 1, NULL, ppcRepresent, 0, NULL, &dScore, &rHMMalignment, &rHMMalignment, NULL, NULL, NULL, NULL, rHhalignPara, &prHHscores[iS], 0/* debug */, FALSE /* Log-Level*/, zcAux, zcError); if (NULL != strstr(zcError, "Viterbi")){ iViterbiCount++; } } /* 0 <= iS < iNseq */ Log(&rLog, LOG_INFO, "Viterbi algorithm triggered %d times (out of %d)", iViterbiCount, iNseq-1); for (iS = 0; iS < iNseq; iS++){ fprintf(pfPP, "%d\t%10s\t%3d" , iS, prMSeq->sqinfo[iS].name, (int)(strlen(prMSeq->orig_seq[iS]))); fprintf(pfPP, "\t%3d\t%f\t%f\t%f", prHHscores[iS].L, prHHscores[iS].sumPP, prHHscores[iS].sumPP/strlen(prMSeq->orig_seq[iS]), prHHscores[iS].hhScore); fprintf(pfPP, "\n"); } /* 0 <= iS < iNseq */ fclose(pfPP); pfPP = NULL; free(ppcRepresent[0]); ppcRepresent[0] = NULL; free(ppcCopy[0]); ppcCopy[0] = NULL; free(ppcRepresent); ppcRepresent = NULL; free(ppcCopy); ppcCopy = NULL; free(prHHscores); prHHscores = NULL; return 1; } /* this is the end of PosteriorProbabilities() */ /** * @brief sequentially align (chain) sequences * * @param[in,out] prMSeq * holds the un-aligned sequences (in) and the final alignment (out) * @param[in] rHhalignPara * various parameters needed by hhalign() * @param[in] iClustersize * parameter that controls how often HMM is updated * * @note chained alignment takes much longer than balanced alignment * because at every step ClustalO has to scan all previously aligned residues. * for a balanced tree this takes N*log(N) time but for a chained tree * it takes N^2 time. * This function has a short-cut, that the HMM need not be updated * for every single alignment step, but the HMM from the previous * step(s) can be re-cycled. The HMM is updated (i) at the very first step, * (ii) if a gap has been inserted into the HMM during alignment or * (iii) if the HMM has been used for too many steps without having * been updated. This update-frequency is controlled by the input * parameter iClustersize. iClustersize is the number of sequences used * to build a HMM to allow for one non-updating step. For example, * if iClustersize=100 and a HMM has been build from 100 sequences, * then this HMM can be used once without updating. If the HMM has * been built from 700 sequences (and iClustersize=100) then this * HMM can be used 7-times without having to be updated, etc. * For this reason the initial iClustersize sequences are always * aligned with fully updated HMMs. */ double PileUp(mseq_t *prMSeq, hhalign_para rHhalignPara, int iClustersize) { /* @<variables local to PileUp> */ int iI; /* iterator for sequences */ int iCountPro = 0; /* count how many sequences are already in profile */ int *piLeafListSeq = NULL; /* lists required by PrepareAlignment() for attaching sequences pointers to ppcProfileSeq */ int *piLeafListPro = NULL; /* -"- ppcProfilePro */ double *pdWeightL = NULL; /* argument used by hhalign, not used here */ double *pdWeightR = NULL; /* -"- */ double *pdWeightP = NULL; /* -"- */ char **ppcProfileSeq = NULL; /* pointer to sequences in profile */ char **ppcProfilePro = NULL; /* pointer to sequences in profile */ double dScore = -1.0; /* alignment score, calculated by hhalign() */ hhalign_scores rHHscores = {0}; /* more explicit scores than dScore */ hmm_light *prHMM = NULL; /* HMM against which to align single sequence */ hmm_light rHMMprofile = {0}; /* HMM calculated externally from profile */ int iHHret = -1; /* return value of hhalign() */ char zcAux[10000] = {0}; /* auxilliary print string used by hhalign() */ char zcError[10000] = {0}; /* error message printed by hhalign() */ char *pcConsensPro = NULL, /* auxilliary sequence strings required by hhalign() */ *pcReprsntPro = NULL, /* not used here */ *pcConsensSeq = NULL, *pcReprsntSeq = NULL; bool bHMM_stale = YES; /* flag if HMM has to be updated */ int iSinceLastUpdate = -1; /* counts how often HMM has been used since last update */ progress_t *prProgress; /* structure required for progress report */ bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE; /* progress report */ char **ppcReprsnt = NULL; /* string used to represent HMM */ ppcReprsnt = CKCALLOC(1, sizeof(char *)); /* weights are not really used, but hhalign() expects them */ pdWeightL = (double *)CKMALLOC(prMSeq->nseqs * sizeof(double)); pdWeightR = (double *)CKMALLOC(prMSeq->nseqs * sizeof(double)); pdWeightP = (double *)CKMALLOC(prMSeq->nseqs * sizeof(double)); /* lists used for attaching ppcProfileSeq/ppcProfilePro to prMSeq */ piLeafListSeq = (int *)CKMALLOC(1 * sizeof(int)); piLeafListPro = (int *)CKMALLOC(prMSeq->nseqs * sizeof(int)); ppcProfileSeq = (char **)CKMALLOC(1 * sizeof(char *)); ppcProfileSeq[0] = NULL; ppcProfilePro = (char **)CKMALLOC(prMSeq->nseqs * sizeof(char *)); for (iI = 0; iI < prMSeq->nseqs; iI++){ ppcProfilePro[iI] = NULL; } piLeafListPro[0] = 0; /* first sequences in profile is simply un-aligned sequence */ iCountPro = 1; /* 'profile' now contains 1 sequence */ NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Progressive alignment progress", bPrintCR); /* build up the initial alignment: * sequences are chained but hhalign() is used normally, * that is, one sequence is aligned to a profile */ for (iI = 1; iI < MIN(prMSeq->nseqs, iClustersize); iI++){ piLeafListSeq[0] = iI; /* PrepareAlignment() connects ppcProfileSeq/ppcProfilePro and prMSeq */ PrepareAlignment(prMSeq, ppcProfilePro, ppcProfileSeq, pdWeightL, pdWeightR, pdWeightP, iI, piLeafListPro, 1, piLeafListSeq); /* align 1 (5th argument) sequence to the growing profile * (2nd argument number of sequences is iI), * external HMM not used at this stage (prHMM=NULL) */ iHHret = hhalign(ppcProfilePro, iI, NULL/*pdWeightL*/, ppcProfileSeq, 1, NULL/*pdWeightR*/, &dScore, prHMM, prHMM, pcConsensPro, pcReprsntPro, pcConsensSeq, pcReprsntSeq, rHhalignPara, &rHHscores, CALL_FROM_REGULAR/* DEBUG ARGUMENT */, rLog.iLogLevelEnabled, zcAux, zcError); piLeafListPro[iI] = iI; iCountPro = iI+1; ProgressLog(prProgress, iI, prMSeq->nseqs-1, FALSE); /* FIXME: test! FS, r288 */ } /* 1 <= iI < MIN(prMSeq->nseqs, iClustersize) */ /* now align single sequences not to a profile but to a HMM of a potentially old profile */ while (iI < prMSeq->nseqs){ /* if HMM not up-to-date then create new HMM for profile: * HMM is stale (i) at the beginning, * (ii) if a gap has been inserted into the HMM during previous step, * (iii) too many steps after the last up-date */ if ( (YES == bHMM_stale) ){ FreeHMMstruct(&rHMMprofile); if (OK != AlnToHMM2(&rHMMprofile, rHhalignPara, prMSeq->seq, iI) ) { Log(&rLog, LOG_ERROR, "Couldn't convert alignment to HMM. Will not proceed with chained alignment, step %d", iI); } bHMM_stale = NO; iSinceLastUpdate = 0; } /* align single sequence to HMM */ piLeafListSeq[0] = iI; PrepareAlignment(prMSeq, ppcProfilePro, ppcProfileSeq, pdWeightL, pdWeightR, pdWeightP, iI, piLeafListPro, 1, piLeafListSeq); /* use representative sequence to track where gaps are inserted into HMM */ ppcReprsnt[0] = CKREALLOC(ppcReprsnt[0], strlen(ppcProfilePro[0])+strlen(ppcProfileSeq[0])+1); memcpy(ppcReprsnt[0], rHMMprofile.seq[rHMMprofile.ncons]+1, rHMMprofile.L); ppcReprsnt[0][rHMMprofile.L] = '\0'; /* align 1 (5th argument) sequence to a HMM (2nd argument number of sequences is '0', * not iI as for a profile or '1' for one representative sequence), * external HMM (rHMMprofile calculated by AlnToHMM2()) is used at this stage */ prHMM = &rHMMprofile; hhalign(ppcReprsnt, 0, NULL, ppcProfileSeq, 1, NULL, &dScore, prHMM, prHMM, pcConsensPro, pcReprsntPro, pcConsensSeq, pcReprsntSeq, rHhalignPara, &rHHscores, CALL_FROM_ALN_HMM/* DEBUG ARGUMENT */, rLog.iLogLevelEnabled, zcAux, zcError); iSinceLastUpdate++; /* HMM has been used one more time */ ProgressLog(prProgress, iI, prMSeq->nseqs-1, FALSE); /* FIXME: test! FS, r288 */ /* check if gaps have been inserted into HMM. * if this is the case then HMM is stale * and the profile used to create the HMM must get gaps at the same positions. * gaps are shown in the representative sequence */ if (rHMMprofile.L != strlen(ppcReprsnt[0])){ int iSeq; /* iterate through sequences of profile */ /* manually insert gaps into existing profile, * there should already be enough memory */ #ifdef HAVE_OPENMP /* all sequences in profile obtain gaps at the same position, * can do this in parallel */ #pragma omp parallel for #endif for (iSeq = 0; iSeq < iI; iSeq++){ int iPtrRep, /* points to 'residue' in representative sequence */ iPtrPrf; /* points to the corresponding position in the profile */ for (iPtrRep = strlen(ppcReprsnt[0])-1, iPtrPrf = strlen(ppcProfilePro[iSeq])-1; iPtrRep >= 0; iPtrRep--){ if ('-' == ppcReprsnt[0][iPtrRep]){ /* gaps are newly introduced into profile */ ppcProfilePro[iSeq][iPtrRep] = '-'; } else { /* non-gap characters (residues) are shifted towards the back */ ppcProfilePro[iSeq][iPtrRep] = ppcProfilePro[iSeq][iPtrPrf]; iPtrPrf--; } if (iPtrRep == iPtrPrf){ /* if profile and representative seq point to same position then no more gaps */ break; } } /* strlen(ppcReprsnt[0])-1 >= iPtrRep >= 0 */ ppcProfilePro[iSeq][strlen(ppcReprsnt[0])] = '\0'; } /* 0 <= iSeq < iI */ /* make HMM stale */ bHMM_stale = YES; } /* strlen(represent) != HMM.L (gaps inserted into HMM) */ /* check if maximum number of sequences exceeded */ if ( (iClustersize <= 1) || (iSinceLastUpdate > (double)(rHMMprofile.N_in)/(double)(iClustersize)) ){ bHMM_stale = YES; } piLeafListPro[iI] = iI; iCountPro = iI+1; iI++; } /* iI < prMSeq->nseqs */ ProgressDone(prProgress); FreeProgress(&prProgress); FreeHMMstruct(&rHMMprofile); CKFREE(pdWeightL); CKFREE(pdWeightR); CKFREE(pdWeightP); if (NULL != ppcReprsnt){ if (NULL != ppcReprsnt[0]){ CKFREE(ppcReprsnt[0]); } CKFREE(ppcReprsnt); } CKFREE(piLeafListSeq); CKFREE(piLeafListPro); return 0.0; } /* this is the end of PileUp() */ /** * @brief wrapper for hhalign. This is a frontend function to * the ported hhalign code. * * @param[in,out] prMSeq * holds the unaligned sequences [in] and the final alignment [out] * @param[in] piOrderLR * holds order in which sequences/profiles are to be aligned, * even elements specify left nodes, odd elements right nodes, * if even and odd are same then it is a leaf * @param[in] pdSeqWeights * Weight per sequence. No weights used if NULL * @param[in] iNodeCount * number of nodes in tree, piOrderLR has 2*iNodeCount elements * @param[in] prHMMList * List of background HMMs (transition/emission probabilities) * @param[in] iHMMCount * Number of input background HMMs * @param[in] iProfProfSeparator * Gives the number of sequences in the first profile, if in * profile/profile alignment mode (iNodeCount==3). That assumes mseqs * holds the sequences of profile 1 and profile 2. * @param[in] rHhalignPara * various parameters read from commandline * * @return score of the alignment FIXME what is this? * * @note complex function. could use some simplification, more and * documentation and a struct'uring of piOrderLR * * @note HHalignWrapper can be entered in 2 different ways: * (i) all sequences are un-aligned (ii) there are 2 (aligned) profiles. * in the un-aligned case (i) the sequences come straight from Squid, * that is, they have been sanitised, all non-alphabetic residues * have been rendered as X's. In profile mode (ii) one profile may * have been produced internally. In that case residues may have * been translated back into their 'native' form, that is, they * may contain un-sanitised residues. These will cause trouble * during alignment * * @note: introduced argument hhalign_para rHhalignPara; FS, r240 -> r241 * @note: if hhalign() fails then try with Viterbi by setting MAC-RAM=0; FS, r241 -> r243 */ double HHalignWrapper(mseq_t *prMSeq, int *piOrderLR, double *pdSeqWeights, int iNodeCount, hmm_light *prHMMList, int iHMMCount, int iProfProfSeparator, hhalign_para rHhalignPara) { int iN; /* node iterator */ int *piLeafCount = NULL; /* number of leaves beneath a certain node */ int **ppiLeafList = NULL; /* list of leaves beneath a certain node */ char **ppcProfile1 = NULL; /* pointer to sequences in profile */ char **ppcProfile2 = NULL; /* pointer to sequences in profile */ char *pcReprsnt1 = NULL; /* representative of HMM aligned to left */ char *pcReprsnt2 = NULL; /* representative of HMM aligned to right */ char **ppcReprsnt1 = &pcReprsnt1; /* representative of HMM aligned to L */ char **ppcReprsnt2 = &pcReprsnt2; /* representative of HMM aligned to R */ char *pcConsens1 = NULL; /* copy of left sequence */ char *pcConsens2 = NULL; /* copy of right sequence */ char **ppcCopy1 = /*&pcCopy1*/NULL; /* copy of left sequences */ char **ppcCopy2 = /*&pcCopy2*/NULL; /* copy of right sequences */ double *pdScores = NULL; /* alignment scores (seq/HMM) */ double dScore = 0.0; /* alignment score (seq/HMM) */ int iAux_FS = 0; char zcAux[10000] = {0}; char zcError[10000] = {0}; int i; /* aux */ progress_t *prProgress; int iAlnLen; /* alignment length */ double *pdWeightsL = NULL; /* sequence weights of left profile */ double *pdWeightsR = NULL; /* sequence weights of right profile */ int iMergeNodeCounter = 0; hmm_light *prHMM = NULL; hmm_light *prHMMleft = NULL; hmm_light *prHMMrght = NULL; hmm_light *prHMMnull = NULL; bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE; #if TIMING char zcStopwatchMsg[1024]; Stopwatch_t *stopwatch = StopwatchCreate(); StopwatchZero(stopwatch); StopwatchStart(stopwatch); #endif hhalign_scores rHHscores = {0}; #if 0 /* DEVEL 291 */ if (NULL != prHMMList) { /* FIXME DEVEL 291: replace this outer test with iHMMCount>0*/ if (iHMMCount>1) { Log(&rLog, LOG_WARN, "FIXME: Using only first of %u HMMs (needs implementation)", iHMMCount); } prHMM = &(prHMMList[0]); /* FIXME DEVEL 291: check for NULL */ } else { /* FIXME: prHMM not allowed to be NULL and therefore pseudo allocated here */ prHMM = (hmm_light *) CKCALLOC(1, sizeof(hmm_light)); } #else prHMMnull = (hmm_light *) CKCALLOC(1, sizeof(hmm_light)); if ( (iHMMCount > 0) && (NULL != prHMMList) ){ prHMM = &(prHMMList[0]); if (iHMMCount > 1) { Log(&rLog, LOG_WARN, "FIXME: Using only first of %u HMMs (needs implementation)", iHMMCount); } } else { prHMM = prHMMnull; /* prHMM not allowed to be NULL and therefore assigned to pseudo allocated */ } #endif assert(NULL!=prMSeq); if (NULL==piOrderLR) { assert(3==iNodeCount); } SanitiseUnknown(prMSeq); /* hhalign was not made for DNA/RNA. So warn if sequences are not * protein */ if (SEQTYPE_PROTEIN != prMSeq->seqtype) { /*Log(&rLog, LOG_WARN, "%s alignment is still experimental.", SeqTypeToStr(prMSeq->seqtype));*/ if(prMSeq->seqtype == SEQTYPE_DNA) rHhalignPara.bIsDna = true; if(prMSeq->seqtype == SEQTYPE_RNA) rHhalignPara.bIsRna = true; } /* hhalign produces funny results if sequences contain gaps, so * dealign. Only way to use alignment info is to use it as a * background HMM */ if (TRUE == prMSeq->aligned) { Log(&rLog, LOG_DEBUG, "Dealigning aligned sequences (inside %s)", __FUNCTION__); DealignMSeq(prMSeq); } #if TRACE Log(&rLog, LOG_FORCED_DEBUG, "iNodeCount = %d", iNodeCount); #endif /* allocate top-level memory for leaf tracking arrays and profiles, * and sequence weights*/ piLeafCount = CKCALLOC(iNodeCount, sizeof(int)); ppiLeafList = CKCALLOC(iNodeCount, sizeof(int *)); ppcProfile1 = CKCALLOC(prMSeq->nseqs*2-1, sizeof(char *)); ppcProfile2 = CKCALLOC(prMSeq->nseqs*2-1, sizeof(char *)); pdScores = CKCALLOC(iNodeCount, sizeof(double)); pdWeightsL = CKCALLOC(iNodeCount, sizeof(double)); pdWeightsR = CKCALLOC(iNodeCount, sizeof(double)); NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Progressive alignment progress", bPrintCR); /* Profile-profile alignment? Then setup piLeafCount and * piLeafList here. FIXME this is just an awful haaaack */ if (iNodeCount==3 && NULL==piOrderLR) { int iSizeProf1 = iProfProfSeparator; int iSizeProf2 = prMSeq->nseqs - iProfProfSeparator; piLeafCount[0] = iSizeProf1; ppiLeafList[0] = (int *)CKMALLOC(iSizeProf1 * sizeof(int)); for (i=0;i<iSizeProf1;i++) { ppiLeafList[0][i] = i; } piLeafCount[1] = iSizeProf2; ppiLeafList[1] = (int *)CKMALLOC(iSizeProf2 * sizeof(int)); for (i=0;i<iSizeProf2;i++) { ppiLeafList[1][i] = i+iSizeProf1; } /* awful hack inside an awful hack: we had to setup piLeafCount * and piLeafList outside the node iteration. this which is * normally done at leaf level inside the node iteration. to * avoid overwriting the already setup vars set... */ iNodeCount=1; piOrderLR = (int *)CKCALLOC(DIFF_NODE, sizeof(int)); piOrderLR[LEFT_NODE] = 0; piOrderLR[RGHT_NODE] = 1; piOrderLR[PRNT_NODE] = 2; } iMergeNodeCounter = 0; for (iN = 0; iN < iNodeCount; iN++){ register int riAux = DIFF_NODE * iN; /*LOG_DEBUG("node %d ", iN);*/ if (piOrderLR[riAux+LEFT_NODE] == piOrderLR[riAux+RGHT_NODE]){ register int riLeaf = piOrderLR[riAux+LEFT_NODE]; #if TRACE if (NULL == pdSeqWeights) { Log(&rLog, LOG_FORCED_DEBUG, "node %d is a leaf with entry %d (seq %s)", iN, riLeaf, prMSeq->sqinfo[riLeaf].name); } else { Log(&rLog, LOG_FORCED_DEBUG, "node %d is a leaf with entry %d (seq %s) and weight %f", iN, riLeaf, prMSeq->sqinfo[riLeaf].name, pdSeqWeights[riLeaf]); } #endif /* left/right entry same, this is a leaf */ piLeafCount[piOrderLR[riAux+PRNT_NODE]] = 1; /* number of leaves is '1' */ ppiLeafList[piOrderLR[riAux+PRNT_NODE]] = (int *)CKMALLOC(1 * sizeof(int)); ppiLeafList[piOrderLR[riAux+PRNT_NODE]][0] = riLeaf; } /* was a leaf */ else { int iL, iR, iP; /* ID of left/right nodes, parent */ int i, j; /* aux */ Log(&rLog, LOG_DEBUG, "merge profiles at node %d", iN, piOrderLR[riAux]); /* iNodeCount - prMSeq->nseqs = total # of merge-nodes * unless in profile/profile alignment mode */ if (1 == iNodeCount) { ProgressLog(prProgress, ++iMergeNodeCounter, 1, FALSE); } else { ProgressLog(prProgress, ++iMergeNodeCounter, iNodeCount - prMSeq->nseqs, FALSE); } /* left/right entry are not same, this is a merge node */ iL = piOrderLR[riAux+LEFT_NODE]; iR = piOrderLR[riAux+RGHT_NODE]; iP = piOrderLR[riAux+PRNT_NODE]; piLeafCount[iP] = piLeafCount[iL] + piLeafCount[iR]; ppiLeafList[iP] = (int *)CKMALLOC(piLeafCount[iP] * sizeof(int)); for (i = j = 0; i < piLeafCount[iL]; i++, j++){ ppiLeafList[iP][j] = ppiLeafList[iL][i]; } for (i = 0; i < piLeafCount[iR]; i++, j++){ ppiLeafList[iP][j] = ppiLeafList[iR][i]; } /* prepare simulation arena: * - make sure enough memory in sequences * - attach sequence pointers to profiles */ /* idea: switch template and query according to nseq? */ PrepareAlignment(prMSeq, ppcProfile1, ppcProfile2, pdWeightsL, pdWeightsR, pdSeqWeights, piLeafCount[iL], ppiLeafList[iL], piLeafCount[iR], ppiLeafList[iR]); if (rLog.iLogLevelEnabled <= LOG_DEBUG){ int i; FILE *fp = LogGetFP(&rLog, LOG_DEBUG); Log(&rLog, LOG_DEBUG, "merging profiles %d & %d", iL, iR); for (i = 0; i < piLeafCount[iL]; i++){ fprintf(fp, "L/#=%3d (ID=%3d, w=%f): %s\n", i, ppiLeafList[iL][i], pdWeightsL[i], ppcProfile1[i]); } for (i = 0; i < piLeafCount[iR]; i++){ fprintf(fp, "R/#=%3d (ID=%3d, w=%f): %s\n", i, ppiLeafList[iR][i], pdWeightsR[i], ppcProfile2[i]); } } #if 1 /* DEVEL 291 */ /* * Note: if there is a HMM-batch file, then prMSeq->ppiHMMBindex is not NULL; * ppiLeafList[iL/iR][0] is the 'lead' sequence in a profile; * prMSeq->ppiHMMBindex[ppiLeafList[iL][0]] are the HMMs associated with the lead sequence; * this could be NULL if there are no HMMs associated with this particular sequence * at the moment only 1st HMM can be used, prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0]; * the index of this HMM can be '-1' if the specified HMM file does not exist * Note: we only use prHMMleft/prHMMrght, even if global HMM (--hmm-in) is specified **/ if ( (NULL != prMSeq->ppiHMMBindex) && (NULL != prMSeq->ppiHMMBindex[ppiLeafList[iL][0]]) && (prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0] > -1) ){ prHMMleft = &(prHMMList[prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0]]); } else if (iHMMCount > 0){ prHMMleft = prHMM; } else { prHMMleft = prHMMnull; } if ( (NULL != prMSeq->ppiHMMBindex) && (NULL != prMSeq->ppiHMMBindex[ppiLeafList[iR][0]]) && (prMSeq->ppiHMMBindex[ppiLeafList[iR][0]][0] > -1) ){ prHMMrght = &(prHMMList[prMSeq->ppiHMMBindex[ppiLeafList[iR][0]][0]]); } else if (iHMMCount > 0){ prHMMrght = prHMM; } else { prHMMrght = prHMMnull; } #endif /* align individual sequences to HMM; * - use representative sequence to get gapping * - create copies of both, individual/representative sequences * as we don't want to introduce gaps into original * * FIXME: representative sequence is crutch, should use * full HMM but that does not seem to work at all * -- try harder! Fail better! */ /*if ( (piLeafCount[iL] <= APPLY_BG_HMM_UP_TO_TREE_DEPTH) && (0 != prHMM->L) ){*/ if (0 != prHMMleft->L){ int i, j; pcReprsnt1 = CKCALLOC(prHMMleft->L+strlen(ppcProfile1[0])+1, sizeof(char)); for (i = 0; i < prHMMleft->L; i++){ pcReprsnt1[i] = prHMMleft->seq[prHMMleft->ncons][i+1]; } ppcCopy1 = CKCALLOC(piLeafCount[iL], sizeof(char *)); for (j = 0; j < piLeafCount[iL]; j++){ ppcCopy1[j] = CKCALLOC(prHMMleft->L+strlen(ppcProfile1[0])+1, sizeof(char)); for (i = 0; i < (int) strlen(ppcProfile1[0]); i++){ ppcCopy1[j][i] = ppcProfile1[j][i]; } } { /* the size of the elements in the forward/backward matrices depends very much on the lengths of the profiles _and_ in which position (1st/2nd) the longer/shorter profile/HMM is. the matrix elements can easily exceed the size of a (long?) double if the longer profile/HMM is associated with the query (q) and the shorter with the target (t). FIXME: however, pseudo-count adding may also depend on position, this is only really tested for the HMM being in the 1st position (q) MUST TEST THIS MORE THOROUGHLY this switch appears to be most easily (although unelegantly) effected here. Don't want to do it (upstairs) in PrepareAlignment() because it might jumble up the nodes. Don't want to do it in hhalign() either because ppcProfile1/2 and q/t may be used independently. FS, r236 -> r237 */ int iLenA = strlen(ppcCopy1[0]); int iLenH = prHMMleft->L; int iHHret = 0; if (iLenH < iLenA){ iHHret = hhalign(ppcReprsnt1, 0/* only one representative seq*/, NULL, ppcCopy1, piLeafCount[iL], pdWeightsL, &dScore, prHMMleft, prHMMleft, NULL, NULL, NULL, NULL, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); } else { iHHret = hhalign(ppcCopy1, piLeafCount[iL], pdWeightsL, ppcReprsnt1, 0/* only one representative seq*/, NULL, &dScore, prHMMleft, prHMMleft, NULL, NULL, NULL, NULL, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); } if ( (0 != iHHret) && (rLog.iLogLevelEnabled <= LOG_VERBOSE) ){ /* FS, r255 -> */ fprintf(stderr, "%s:%s:%d: (not essential) HMM pre-alignment failed, error %d, \n" "\t#=%d (len=%d), lead-seq=%s, len(HMM)=%d\n%s\nCARRY ON REGARDLESS\n", __FUNCTION__, __FILE__, __LINE__, iHHret, piLeafCount[iL], (int)strlen(ppcCopy1[0]), prMSeq->sqinfo[ppiLeafList[iL][0]].name, (int)strlen(ppcReprsnt1[0]), zcError); } } pdScores[ppiLeafList[iL][0]] = dScore; #if 0 printf("score: %f\nL: %s\nH: %s\n", dScore, ppcCopy1[0], ppcReprsnt1[0]); #endif /* assemble 'consensus'; * this is not a real consensus, it is more a gap indicator, * for each position it consists of residues/gaps in the 1st sequences, * or a residue (if any) of the other sequences. * it only contains a gap if all sequences of the profile * have a gap at this position */ pcConsens1 = CKCALLOC(prHMMleft->L+strlen(ppcProfile1[0])+1, sizeof(char)); for (i = 0; i < prHMMleft->L; i++){ for (j = 0, pcConsens1[i] = '-'; (j < piLeafCount[iL]) && ('-' == pcConsens1[i]); j++){ pcConsens1[i] = ppcCopy1[j][i]; } } #if 0 for (j = 0; (j < piLeafCount[iL]); j++){ printf("L%d:%s\n", j, ppcCopy1[j]); } printf("LC:%s\n", pcConsens1); #endif } /* ( (1 == piLeafCount[iL]) && (0 != prHMM->L) ) */ /*if ( (piLeafCount[iR] <= APPLY_BG_HMM_UP_TO_TREE_DEPTH) && (0 != prHMM->L) ){*/ if (0 != prHMMrght->L){ int i, j; pcReprsnt2 = CKCALLOC(prHMMrght->L+strlen(ppcProfile2[0])+1, sizeof(char)); for (i = 0; i < prHMMrght->L; i++){ pcReprsnt2[i] = prHMMrght->seq[prHMMrght->ncons][i+1]; } ppcCopy2 = CKCALLOC(piLeafCount[iR], sizeof(char *)); for (j = 0; j < piLeafCount[iR]; j++){ ppcCopy2[j] = CKCALLOC(prHMMrght->L+strlen(ppcProfile2[0])+1, sizeof(char)); for (i = 0; i < (int) strlen(ppcProfile2[0]); i++){ ppcCopy2[j][i] = ppcProfile2[j][i]; } } { /* the size of the elements in the forward/backward matrices depends very much on the lengths of the profiles _and_ in which position (1st/2nd) the longer/shorter profile/HMM is. the matrix elements can easily exceed the size of a (long?) double if the longer profile/HMM is associated with the query (q) and the shorter with the target (t). FIXME: however, pseudo-count adding may also depend on position, this is only really tested for the HMM being in the 1st position (q) MUST TEST THIS MORE THOROUGHLY this switch appears to be most easily (although unelegantly) effected here. Don't want to do it (upstairs) in PrepareAlignment() because it might jumble up the nodes. Don't want to do it in hhalign() either because ppcProfile1/2 and q/t may be used independently. FS, r236 -> r237 */ int iLenA = strlen(ppcCopy2[0]); int iLenH = prHMMrght->L; int iHHret = 0; if (iLenH < iLenA){ iHHret = hhalign(ppcReprsnt2, 0/* only one representative seq */, NULL, ppcCopy2, piLeafCount[iR], pdWeightsR, &dScore, prHMMrght, prHMMrght, NULL, NULL, NULL, NULL, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); } else { iHHret = hhalign(ppcCopy2, piLeafCount[iR], pdWeightsR, ppcReprsnt2, 0/* only one representative seq */, NULL, &dScore, prHMMrght, prHMMrght, NULL, NULL, NULL, NULL, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); } if ( (0 != iHHret) && (rLog.iLogLevelEnabled <= LOG_VERBOSE) ){ /* FS, r255 -> */ fprintf(stderr, "%s:%s:%d: (not essential) HMM pre-alignment failed, error %d, \n" "\t#=%d (len=%d), lead-seq=%s, len(HMM)=%d\n%s\nCARRY ON REGARDLESS\n", __FUNCTION__, __FILE__, __LINE__, iHHret, piLeafCount[iR], (int)strlen(ppcCopy2[0]), prMSeq->sqinfo[ppiLeafList[iR][0]].name, (int)strlen(ppcReprsnt2[0]), zcError); } } pdScores[ppiLeafList[iR][0]] = dScore; #if 0 printf("H: %s\nR: %s\nscore: %f\n", ppcReprsnt2[0], ppcCopy2[0], dScore); #endif /* assemble 'consensus'; * this is not a real consensus, it is more a gap indicator, * for each position it consists of residues/gaps in the 1st sequences, * or a residue (if any) of the other sequences. * it only contains a gap if all sequences of the profile * have a gap at this position */ pcConsens2 = CKCALLOC(prHMMrght->L+strlen(ppcProfile2[0])+1, sizeof(char)); for (i = 0; i < prHMMrght->L; i++){ for (j = 0, pcConsens2[i] = '-'; (j < piLeafCount[iR]) && ('-' == pcConsens2[i]); j++){ pcConsens2[i] = ppcCopy2[j][i]; } } #if 0 for (j = 0; (j < piLeafCount[iR]); j++){ printf("R%d:%s\n", j, ppcCopy2[j]); } printf("RC:%s\n", pcConsens2); #endif } /* ( (1 == piLeafCount[iR]) && (0 != prHMM->L) ) */ /* do alignment here (before free) */ { /* the size of the elements in the forward/backward matrices depends very much on the lengths of the profiles _and_ in which position (1st/2nd) the longer/shorter profile is. the matrix elements can easily exceed the size of a (long?) double if the longer profile is associated with the query (q) and the shorter with the target (t). this switch appears to be most easily (although unelegantly) effected here. Don't want to do it (upstairs) in PrepareAlignment() because it might jumble up the nodes. Don't want to do it in hhalign() either because ppcProfile1/2 and q/t may be used independently. FS, r228 -> 229 */ int iLen1 = strlen(ppcProfile1[0]); int iLen2 = strlen(ppcProfile2[0]); /* potential problem with empty profiles, FS, r249 -> r250 */ if ( (0 == iLen1) || (0 == iLen2) ){ Log(&rLog, LOG_FATAL, "strlen(prof1)=%d, strlen(prof2)=%d -- nothing to align\n", iLen1, iLen2); } if (iLen1 < iLen2){ int iHHret = 0; int iOldMacRam = rHhalignPara.iMacRamMB; iHHret = hhalign(ppcProfile1, piLeafCount[iL], pdWeightsL, ppcProfile2, piLeafCount[iR], pdWeightsR, &dScore, prHMMleft, prHMMrght, pcConsens1, pcReprsnt1, pcConsens2, pcReprsnt2, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); if (RETURN_OK != iHHret){ /* FS, r241 -> */ /*fprintf(stderr, "%s:%d: emergency EXIT\n", __FILE__, __LINE__); exit(-1);*/ fprintf(stderr, "%s:%s:%d: problem in alignment (profile sizes: %d + %d) (%s + %s), forcing Viterbi\n" "\thh-error-code=%d (mac-ram=%d)\n%s", __FUNCTION__, __FILE__, __LINE__, piLeafCount[iL], piLeafCount[iR], prMSeq->sqinfo[ppiLeafList[iL][0]].name, prMSeq->sqinfo[ppiLeafList[iR][0]].name, iHHret, rHhalignPara.iMacRamMB, zcError); /* at this stage hhalign() has failed, the only thing we can do (easily) is to re-run it in Viterbi mode, for this set MAC-RAM=0, set it back to its original value after 2nd try. FS, r241 -> r243 */ if (RETURN_FROM_MAC == iHHret){ /* Note: the default way to run hhalign() is to initially select MAC by giving it all the memory it needs. MAC may fail due to overflow (repeats?). alternatively, the problem may be (genuinely) too big for MAC. in thses cases it is legitimate to switch to Viterbi. However, selecting Viterbi from the outset is an abuse (abomination!), should this 1st invocation of Viterbi fail, then we (FS) will overrule the user and hammer the system with a massive memory request. (Jos 2:19) If anyone goes outside your house into the street, his blood will be on his own head; we will not be responsible. FS, r246 -> r247 */ rHhalignPara.iMacRamMB = 0; } else { rHhalignPara.iMacRamMB = REALLY_BIG_MEMORY_MB; } iHHret = hhalign(ppcProfile1, piLeafCount[iL], pdWeightsL, ppcProfile2, piLeafCount[iR], pdWeightsR, &dScore, prHMMleft, prHMMrght, pcConsens1, pcReprsnt1, pcConsens2, pcReprsnt2, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); if (RETURN_OK != iHHret){ /* at this stage hhalign() has failed twice, 1st time MAC, 2nd time Viterbi, don't know what to do else. FS, r241 -> r243 */ fprintf(stderr, "%s:%s:%d: problem in alignment, Viterbi did not work\n" "\thh-error-code=%d (mac-ram=%d)\n%s", __FUNCTION__, __FILE__, __LINE__, iHHret, rHhalignPara.iMacRamMB, zcError); Log(&rLog, LOG_FATAL, "could not perform alignment -- bailing out\n"); } else { fprintf(stderr, "%s:%s:%d: 2nd attempt worked", __FUNCTION__, __FILE__, __LINE__); } rHhalignPara.iMacRamMB = iOldMacRam; } /* 1st invocation failed */ } /* 1st profile was shorter than 2nd */ else { int iHHret = 0; int iOldMacRam = rHhalignPara.iMacRamMB; iHHret = hhalign(ppcProfile2, piLeafCount[iR], pdWeightsR, ppcProfile1, piLeafCount[iL], pdWeightsL, &dScore, prHMMrght, prHMMleft, pcConsens2, pcReprsnt2, pcConsens1, pcReprsnt1, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); if (RETURN_OK != iHHret){ /* FS, r241 -> r243 */ /*fprintf(stderr, "%s:%d: emergency EXIT\n", __FILE__, __LINE__); exit(-1);*/ fprintf(stderr, "%s:%s:%d: problem in alignment (profile sizes: %d + %d) (%s + %s), forcing Viterbi\n" "\thh-error-code=%d (mac-ram=%d)\n%s", __FUNCTION__, __FILE__, __LINE__, piLeafCount[iL], piLeafCount[iR], prMSeq->sqinfo[ppiLeafList[iL][0]].name, prMSeq->sqinfo[ppiLeafList[iR][0]].name, iHHret, rHhalignPara.iMacRamMB, zcError); /* at this stage hhalign() has failed, the only thing we can do (easily) is to re-run it in Viterbi mode, for this set MAC-RAM=0, set it back to its original value after 2nd try. FS, r241 -> r243 */ if (RETURN_FROM_MAC == iHHret){ /* see above */ rHhalignPara.iMacRamMB = 0; } else { rHhalignPara.iMacRamMB = REALLY_BIG_MEMORY_MB; } iHHret = hhalign(ppcProfile2, piLeafCount[iR], pdWeightsR, ppcProfile1, piLeafCount[iL], pdWeightsL, &dScore, prHMMrght, prHMMleft, pcConsens2, pcReprsnt2, pcConsens1, pcReprsnt1, rHhalignPara, &rHHscores, iAux_FS++, /* DEBUG ARGUMENT */ rLog.iLogLevelEnabled, zcAux, zcError); if (RETURN_OK != iHHret){ /* at this stage hhalign() has failed twice, 1st time MAC, 2nd time Viterbi, don't know what to do else. FS, r241 -> r243 */ fprintf(stderr, "%s:%s:%d: problem in alignment, Viterbi did not work\n" "\thh-error-code=%d (mac-ram=%d)\n%s", __FUNCTION__, __FILE__, __LINE__, iHHret, rHhalignPara.iMacRamMB, zcError); Log(&rLog, LOG_FATAL, "could not perform alignment -- bailing out\n"); } else { fprintf(stderr, "%s:%s:%d: 2nd attempt worked", __FUNCTION__, __FILE__, __LINE__); } rHhalignPara.iMacRamMB = iOldMacRam; } /* 1st invocation failed */ } /* 2nd profile was shorter than 1st */ /* * at this stage have performed alignment of 2 profiles/sequences. * if HMM batch information had been used then have choices: * (i) if HMM info only intended for initial alignment (of sequences) then make both HMMs stale; * (iia) if alignment of 2 profiles/sequences where same HMM used, then retain; * (iib) if alignment of 2 profiles/sequences where different HMMs used, then make both stale; * (iii) some majority voting */ #if 0 /* always make HMM batch stale (after 1st invocation) */ if ( (NULL != prMSeq->ppiHMMBindex) && (NULL != prMSeq->ppiHMMBindex[ppiLeafList[iL][0]]) ){ prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0] = -1; } if ( (NULL != prMSeq->ppiHMMBindex) && (NULL != prMSeq->ppiHMMBindex[ppiLeafList[iR][0]]) ){ prMSeq->ppiHMMBindex[ppiLeafList[iR][0]][0] = -1; } #else /* retain HMMs if they were the same for both profiles */ if (NULL != prMSeq->ppiHMMBindex) { int i; if ( (NULL != prMSeq->ppiHMMBindex[ppiLeafList[iL][0]]) && (NULL != prMSeq->ppiHMMBindex[ppiLeafList[iR][0]]) ){ if ( prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0] == -1){ prMSeq->ppiHMMBindex[ppiLeafList[iR][0]][0] = -1; /* this is conservative, could do H[iL] = H[iR] */ for (i = 0; i < piLeafCount[iR]; i++){ prMSeq->ppiHMMBindex[ppiLeafList[iR][i]][0] = -1; } } else if ( prMSeq->ppiHMMBindex[ppiLeafList[iR][0]][0] == -1){ prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0] = -1; /* this is conservative, could do H[iR] = H[iL] */ for (i = 0; i < piLeafCount[iL]; i++){ prMSeq->ppiHMMBindex[ppiLeafList[iL][i]][0] = -1; } } else if (prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0] != prMSeq->ppiHMMBindex[ppiLeafList[iR][0]][0]){ prMSeq->ppiHMMBindex[ppiLeafList[iL][0]][0] = -1; /* this is NOT conservative, mandatory */ for (i = 0; i < piLeafCount[iL]; i++){ prMSeq->ppiHMMBindex[ppiLeafList[iL][i]][0] = -1; } prMSeq->ppiHMMBindex[ppiLeafList[iR][0]][0] = -1; /* this is NOT conservative, mandatory */ for (i = 0; i < piLeafCount[iR]; i++){ prMSeq->ppiHMMBindex[ppiLeafList[iR][i]][0] = -1; } } else { /* void, HMMs should be same */ } } } /* there was a HMM batch */ #endif if (rLog.iLogLevelEnabled <= LOG_DEBUG){ int i; printf("@@iL=%d, #(iL)=%d, iR=%d, #(iR)=%d\n", iL, piLeafCount[iL], iR, piLeafCount[iR]); for (i = 0; i < piLeafCount[iL]; i++){ char *pc = ppcProfile1[i]; printf("@@>%s\n", prMSeq->sqinfo[ppiLeafList[iL][i]].name); printf("@@"); while('\0' != *pc){ printf("%c", toupper(*pc)); pc++; } printf("\n"); } for (i = 0; i < piLeafCount[iR]; i++){ char *pc = ppcProfile2[i]; printf("@@>%s\n", prMSeq->sqinfo[ppiLeafList[iR][i]].name); printf("@@"); while('\0' != *pc){ printf("%c", toupper(*pc)); pc++; } printf("\n"); } printf("\n"); } /* LOG_DEBUG */ } /* free left/right node lists, * after alignment left/right profiles no longer needed */ if (NULL != ppcCopy1){ int i; for (i = 0; i < piLeafCount[iL]; i++){ CKFREE(ppcCopy1[i]); } CKFREE(ppcCopy1); CKFREE(pcReprsnt1); CKFREE(pcConsens1); } if (NULL != ppcCopy2){ int i; for (i = 0; i < piLeafCount[iR]; i++){ CKFREE(ppcCopy2[i]); } CKFREE(ppcCopy2); CKFREE(pcReprsnt2); CKFREE(pcConsens2); } ppiLeafList[iL] = CKFREE(ppiLeafList[iL]); ppiLeafList[iR] = CKFREE(ppiLeafList[iR]); piLeafCount[iL] = piLeafCount[iR] = 0; } /* was a merge node */ if (rLog.iLogLevelEnabled <= LOG_DEBUG){ int i, j; FILE *fp = LogGetFP(&rLog, LOG_DEBUG); for (i = 0; i < iNodeCount; i++){ if (0 == piLeafCount[i]){ continue; } fprintf(fp, "node %3d, #leaves=%d:\t", i, piLeafCount[i]); for (j = 0; ppiLeafList && (j < piLeafCount[i]); j++){ fprintf(fp, "%d,", ppiLeafList[i][j]); } fprintf(fp, "\n"); } } } /* 0 <= iN < iNodeCount */ ProgressDone(prProgress); /* check length and set length info */ iAlnLen = strlen(prMSeq->seq[0]); for (i=0; i<prMSeq->nseqs; i++) { #if 0 Log(&rLog, LOG_FORCED_DEBUG, "seq no %d: name %s; len %d; %s", i, prMSeq->sqinfo[i].name, strlen(prMSeq->seq[i]), prMSeq->seq[i]); #endif #ifndef NDEBUG assert(iAlnLen == strlen(prMSeq->seq[i])); #endif prMSeq->sqinfo[i].len = iAlnLen; } prMSeq->aligned = TRUE; if (rLog.iLogLevelEnabled <= LOG_DEBUG){ if (0 != prHMM->L){ int i; Log(&rLog, LOG_DEBUG, "Alignment scores with HMM:"); for (i = 0; /*pdScores[i] > 0.0*/i < prMSeq->nseqs; i++){ Log(&rLog, LOG_DEBUG, "%2d:\t%f\n", i, pdScores[i]); } } } /** translate back ambiguity residues * hhalign translates ambiguity codes (B,Z) into unknown residues (X). * as we still have the original input we can substitute them back */ TranslateUnknown2Ambiguity(prMSeq); ReAttachLeadingGaps(prMSeq, iProfProfSeparator); #if 0 /* DEVEL 291 */ if (NULL == prHMMList){ CKFREE(prHMM); } #else CKFREE(prHMMnull); #endif CKFREE(ppcProfile2); CKFREE(ppcProfile1); CKFREE(ppiLeafList[piOrderLR[DIFF_NODE*(iNodeCount-1)+PRNT_NODE]]); CKFREE(ppiLeafList); CKFREE(piLeafCount); CKFREE(pdScores); FreeProgress(&prProgress); CKFREE(pdWeightsL); CKFREE(pdWeightsR); #if TIMING StopwatchStop(stopwatch); StopwatchDisplay(stdout, "Total time for HHalignWrapper():" , stopwatch); StopwatchFree(stopwatch); #endif return dScore; /* FIXME alternative: return averaged pdScores */ } /*** end: HHalignWrapper() ***/
8598.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 1; t4 <= nx - 1; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 8) for (t10 = t8; t10 <= (ny - 1 < t8 + 7 ? ny - 1 : t8 + 7); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 1 ? t4 + 7 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 8) for (t10 = t8; t10 <= (ny - 1 < t8 + 7 ? ny - 1 : t8 + 7); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < nx - 2 ? t4 + 7 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 8) for (t10 = t8; t10 <= (ny - 2 < t8 + 7 ? ny - 2 : t8 + 7); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
facedetectcnn.h
/* By downloading, copying, installing or using the software you agree to this license. If you do not agree to this license, do not download, install, copy or use the software. License Agreement For libfacedetection (3-clause BSD License) Copyright (c) 2018-2021, Shiqi Yu, all rights reserved. shiqi.yu@gmail.com Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of the copyright holders nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall copyright holders or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. */ #pragma once #include "facedetection_export.h" //#define _ENABLE_AVX512 //Please enable it if X64 CPU //#define _ENABLE_AVX2 //Please enable it if X64 CPU //#define _ENABLE_NEON //Please enable it if ARM CPU FACEDETECTION_EXPORT int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!! unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image! /* DO NOT EDIT the following code if you don't really understand it. */ #if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2) #include <immintrin.h> #endif #if defined(_ENABLE_NEON) #include "arm_neon.h" //NEON does not support UINT8*INT8 dot product //to conver the input data to range [0, 127], //and then use INT8*INT8 dot product #define _MAX_UINT8_VALUE 127 #else #define _MAX_UINT8_VALUE 255 #endif #if defined(_ENABLE_AVX512) #define _MALLOC_ALIGN 512 #elif defined(_ENABLE_AVX2) #define _MALLOC_ALIGN 256 #else #define _MALLOC_ALIGN 128 #endif #if defined(_ENABLE_AVX512)&& defined(_ENABLE_NEON) #error Cannot enable the two of AVX512 and NEON at the same time. #endif #if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON) #error Cannot enable the two of AVX and NEON at the same time. #endif #if defined(_ENABLE_AVX512)&& defined(_ENABLE_AVX2) #error Cannot enable the two of AVX512 and AVX2 at the same time. #endif #if defined(_OPENMP) #include <omp.h> #endif #include "string.h" #include "vector" #include "iostream" #include "typeinfo" using namespace std; void* myAlloc(size_t size); void myFree_(void* ptr); #define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0); #ifndef MIN # define MIN(a,b) ((a) > (b) ? (b) : (a)) #endif #ifndef MAX # define MAX(a,b) ((a) < (b) ? (b) : (a)) #endif typedef struct FaceRect_ { float score; int x; int y; int w; int h; int lm[10]; }FaceRect; typedef struct ConvInfoStruct_ { int channels; int num_filters; bool is_depthwise; bool is_pointwise; bool with_relu; float* pWeights; float* pBiases; }ConvInfoStruct; template <typename T> class CDataBlob { public: T * data; int rows; int cols; int channels; //in element int channelStep; //in byte public: CDataBlob() { data = 0; rows = 0; cols = 0; channels = 0; channelStep = 0; } CDataBlob(int r, int c, int ch) { data = 0; create(r, c, ch); //#warning "confirm later" //setZero(); } ~CDataBlob() { setNULL(); } void setNULL() { if (data) myFree(&data); rows = cols = channels = channelStep = 0; } void setZero() { if(data) memset(data, 0, channelStep * rows * cols); } inline bool isEmpty() { return (rows <= 0 || cols <= 0 || channels == 0 || data == NULL); } bool create(int r, int c, int ch) { setNULL(); rows = r; cols = c; channels = ch; //alloc space for int8 array int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8); if (remBytes == 0) this->channelStep = channels * sizeof(T); else this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes; data = (T*)myAlloc(size_t(rows) * cols * this->channelStep); if (data == NULL) { cerr << "Failed to alloc memeory for uint8 data blob: " << rows << "*" << cols << "*" << channels << endl; return false; } //memset(data, 0, width * height * channelStep); //the following code is faster than memset //but not only the padding bytes are set to zero. //BE CAREFUL!!! //#if defined(_OPENMP) //#pragma omp parallel for //#endif // for (int r = 0; r < this->rows; r++) // { // for (int c = 0; c < this->cols; c++) // { // int pixel_end = this->channelStep / sizeof(T); // T * pI = this->ptr(r, c); // for (int ch = this->channels; ch < pixel_end; ch++) // pI[ch] = 0; // } // } return true; } inline T * ptr(int r, int c) { if( r < 0 || r >= this->rows || c < 0 || c >= this->cols ) return NULL; return (this->data + (size_t(r) * this->cols + c) * this->channelStep /sizeof(T)); } bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep) { if (imgData == NULL) { cerr << "The input image data is null." << endl; return false; } if (typeid(float) != typeid(T)) { cerr << "DataBlob must be float in the current version." << endl; return false; } if (imgChannels != 3) { cerr << "The input image must be a 3-channel RGB image." << endl; return false; } //only 27 elements used for each pixel create((imgHeight+1)/2, (imgWidth+1)/2, 32); //since the pixel assignment cannot fill all the elements in the blob. //some elements in the blob should be initialized to 0 setZero(); #if defined(_OPENMP) #pragma omp parallel for #endif for (int r = 0; r < this->rows; r++) { for (int c = 0; c < this->cols; c++) { T * pData = this->ptr(r, c); for (int fy = -1; fy <= 1; fy++) { int srcy = r * 2 + fy; if (srcy < 0 || srcy >= imgHeight) //out of the range of the image continue; for (int fx = -1; fx <= 1; fx++) { int srcx = c * 2 + fx; if (srcx < 0 || srcx >= imgWidth) //out of the range of the image continue; const unsigned char * pImgData = imgData + size_t(imgWidthStep) * srcy + imgChannels * srcx; //int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image int output_channel_offset = ((fy + 1) * 3 + fx + 1) ; //3x3 filters, 3-channel image pData[output_channel_offset] = (pImgData[0]); pData[output_channel_offset+9] = (pImgData[1]); pData[output_channel_offset+18] = (pImgData[2]); } } } } return true; } inline T getElement(int r, int c, int ch) { if (this->data) { if (r >= 0 && r < this->rows && c >= 0 && c < this->cols && ch >= 0 && ch < this->channels) { T * p = this->ptr(r, c); return (p[ch]); } } return (T)(0); } friend ostream &operator<<(ostream &output, CDataBlob &dataBlob) { output << "DataBlob Size (channels, rows, cols) = (" << dataBlob.channels << ", " << dataBlob.rows << ", " << dataBlob.cols << ")" << endl; if( dataBlob.rows * dataBlob.cols * dataBlob.channels <= 16) { //print the elements only when the total number is less than 64 for (int ch = 0; ch < dataBlob.channels; ch++) { output << "Channel " << ch << ": " << endl; for (int r = 0; r < dataBlob.rows; r++) { output << "("; for (int c = 0; c < dataBlob.cols; c++) { T * p = dataBlob.ptr(r, c); if(sizeof(T)<4) output << (int)(p[ch]); else output << p[ch]; if (c != dataBlob.cols - 1) output << ", "; } output << ")" << endl; } } } else output << "(" << dataBlob.getElement(0,0,0) << ", ..., " << dataBlob.getElement(dataBlob.rows-1, dataBlob.cols-1, dataBlob.channels-1) << endl; return output; } }; template <typename T> class Filters{ public: int channels; int num_filters; bool is_depthwise; bool is_pointwise; bool with_relu; CDataBlob<T> weights; CDataBlob<T> biases; Filters() { channels = 0; num_filters = 0; is_depthwise = false; is_pointwise = false; with_relu = true; } //bool init(ConvInfoStruct * pInfo) Filters & operator=(ConvInfoStruct & convinfo) { if (typeid(float) != typeid(T)) { cerr << "The data type must be float in this version." << endl; return *this; } if (typeid(float*) != typeid(convinfo.pWeights) || typeid(float*) != typeid(convinfo.pBiases)) { cerr << "The data type of the filter parameters must be float in this version." << endl; return *this; } this->channels = convinfo.channels; this->num_filters = convinfo.num_filters; this->is_depthwise = convinfo.is_depthwise; this->is_pointwise = convinfo.is_pointwise; this->with_relu = convinfo.with_relu; if(!this->is_depthwise && this->is_pointwise) //1x1 point wise { this->weights.create(1, num_filters, channels); } else if(this->is_depthwise && !this->is_pointwise) //3x3 depth wise { this->weights.create(1, 9, channels); } else { cerr << "Unsupported filter type. Only 1x1 point-wise and 3x3 depth-wise are supported." << endl; return *this; } this->biases.create(1, 1, num_filters); //the format of convinfo.pWeights/biases must meet the format in this->weigths/biases for(int fidx = 0; fidx < this->weights.cols; fidx++) memcpy(this->weights.ptr(0,fidx), convinfo.pWeights + channels * fidx , channels * sizeof(T)); memcpy(this->biases.ptr(0,0), convinfo.pBiases, sizeof(T) * this->num_filters); return *this; } }; bool convolution(CDataBlob<float> & inputData, Filters<float> & filters, CDataBlob<float> & outputData, bool do_relu = true); bool convolutionDP(CDataBlob<float> & inputData, Filters<float> & filtersP, Filters<float> & filtersD, CDataBlob<float> & outputData, bool do_relu = true); bool convolution4layerUnit(CDataBlob<float> & inputData, Filters<float> & filtersP1, Filters<float> & filtersD1, Filters<float> & filtersP2, Filters<float> & filtersD2, CDataBlob<float> & outputData, bool do_relu = true); bool maxpooling2x2S2(CDataBlob<float> &inputData, CDataBlob<float> &outputData); template<typename T> bool extract(CDataBlob<T> &inputData, CDataBlob<T> &loc, CDataBlob<T> &conf, CDataBlob<T> &iou, int num_priors); template<typename T> bool concat4(CDataBlob<T> &inputData1, CDataBlob<T> &inputData2, CDataBlob<T> &inputData3, CDataBlob<T> &inputData4, CDataBlob<T> &outputData); bool priorbox( int feature_width, int feature_height, int img_width, int img_height, int step, int num_sizes, float * pWinSizes, CDataBlob<float> & outputData); bool softmax1vector2class(CDataBlob<float> &inputOutputData); /* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */ template<typename T> bool blob2vector(CDataBlob<T> &inputData, CDataBlob<T> & outputData); bool softmax1vector2class(CDataBlob<float> &inputOutputData); bool clamp1vector(CDataBlob<float> &inputOutputData); bool detection_output(CDataBlob<float> & priorbox, CDataBlob<float> & loc, CDataBlob<float> & conf, CDataBlob<float> & iou, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob<float> & outputData); vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
arch.h
/*! * Software SPAMS v2.5 - Copyright 2009-2014 Julien Mairal * * This file is part of SPAMS. * * SPAMS is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * SPAMS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with SPAMS. If not, see <http://www.gnu.org/licenses/>. * * * \file * toolbox arch * * by Yuansi Chen * yuansi.chen@berkeley.edu * * File arch.h * \brief Contains archetypal analysis algorithms * It requires the toolbox linalg */ // This file was written by Yuansi Chen #ifndef ARCH_H #define ARCH_H #include <utils.h> #include "lsqsplx.h" #include "projsplx.h" #define NEW_VERSION /* ************************** * Alternating Archetypal Analysis * **************************/ /// Alternating Minimization /// Each sub-quadratic programming is solved by ActiveSet Method template <typename T> void arch(const Matrix<T>& X, const Matrix<T>& Z0, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const int I1 = 3, const int I2 = 20, const T lambda2 = T(10e-5), const T epsilon = T(10e-5),const bool computeZtZ = true); template <typename T> void archRobust(const Matrix<T>& X, const Matrix<T>& Z0, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const int I1 = 3, const int I2 = 20, const T lambda2 = T(10e-5), const T epsilon = T(10e-5), const T epsilon2 = T(10e-3),const bool computeZtZ = true); /// General functions including previous ones. Less parameters and simple use, for Python and Matlab interface template <typename T> void archetypalAnalysis(const Matrix<T>& X, const Matrix<T>& Z0, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const bool robust =false, const T epsilon2 = T(10e-3), const bool computeXtX = false, const int stepsFISTA = 5, const int stepsAS = 50, const int numThreads=-1); template <typename T> void archetypalAnalysis(const Matrix<T>& X, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const bool robust = false, const T epsilon2 = T(10e-3), const bool computeXtX = false, const int stepsFISTA = 5, const int stepsAS = 50, const bool randominit = true, const int numThreads=-1); template <typename T> void decompSimplex(const Matrix<T>& X, const Matrix<T>& Z, SpMatrix<T>& alpha, const bool computerZtZ = false, const int numThreads=-1); /* ************************** * Implementations * **************************/ template <typename T> void arch(const Matrix<T>& X, const Matrix<T>& Z0, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const int I1, const int I2, const T lambda2, const T epsilon, const bool computeXtX) { const int m = X.m(); const int n = X.n(); const int p = Z0.n(); Z.copy(Z0); Matrix<T> AlphaT(p,n); Matrix<T> BetaT(n,p); T RSS = -1.0; Vector<T> refColZ; Vector<T> copRowAlphaT; Vector<T> refColBetaT; Matrix<T> matRSS(m,n); Vector<T> vBarre(m); Vector<T> norms; cout.precision(8); for(int t=0; t<I1; ++t) { // step 1: fix Z to compute Alpha #pragma omp parallel for for(int i=0; i<n; ++i) { Vector<T> refColX; Vector<T> refColAlphaT; X.refCol(i,refColX); AlphaT.refCol(i, refColAlphaT); gpFISTAFor(Z,refColX, refColAlphaT, T(1.0), T(1.0/0.7), 50, true); } // step 2: fix Alpha, fix all but one to compute Zi Vector<T> refColX; for(int l=0; l<p; ++l) { AlphaT.copyRow(l, copRowAlphaT); T sumAsq = copRowAlphaT.nrm2sq(); Z.refCol(l, refColZ); // matRSS = X- Z*AlphaT matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); if(sumAsq < T(10e-8)) { // singular matRSS.norm_2_cols(norms); int k = norms.max(); X.refCol(k, refColX); refColZ.copy(refColX); } else { matRSS.rank1Update(refColZ, copRowAlphaT); matRSS.mult(copRowAlphaT, vBarre, 1/sumAsq, T()); // least square to get Beta BetaT.refCol(l, refColBetaT); gpFISTAFor(X, vBarre, refColBetaT, T(1.0), T(1.0/0.7), 50, true); X.mult(refColBetaT, refColZ); } } matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); RSS = matRSS.normFsq(); cout << "RSS FISTA = " << RSS << endl; flush(cout); } for(int t=0; t<I2; ++t) { Matrix<T> G; if (computeXtX) { Z.XtX(G); G.addDiag(lambda2*lambda2); } // step 1: fix Z to compute Alpha #pragma omp parallel for for(int i=0; i<n; ++i) { Vector<T> refColX; Vector<T> refColAlphaT; X.refCol(i,refColX); AlphaT.refCol(i, refColAlphaT); if (computeXtX) { activeSetS<T>(Z,refColX, refColAlphaT, G, lambda2, epsilon); } else { activeSet<T>(Z,refColX, refColAlphaT, lambda2, epsilon); } } // step 2: fix Alpha, fix all but one to compute Zi #ifdef NEW_VERSION // new version Vector<T> refColX; Vector<T> tmp; matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); for(int l=0; l<p; ++l) { AlphaT.copyRow(l, copRowAlphaT); T sumAsq = copRowAlphaT.nrm2sq(); Z.refCol(l, refColZ); // matRSS = X- Z*AlphaT if(sumAsq < T(10e-8)) { // singular matRSS.norm_2_cols(norms); int k = norms.max(); X.refCol(k, refColX); refColZ.copy(refColX); } else { //matRSS.rank1Update(refColZ, copRowAlphaT); matRSS.mult(copRowAlphaT, vBarre, 1/sumAsq, T()); vBarre.add(refColZ); tmp.copy(refColZ); // least square to get Beta BetaT.refCol(l, refColBetaT); activeSet<T>(X, vBarre, refColBetaT, lambda2, epsilon); X.mult(refColBetaT, refColZ); tmp.sub(refColZ); matRSS.rank1Update(tmp, copRowAlphaT); } } #else // end new version Vector<T> refColX; for(int l=0; l<p; ++l) { AlphaT.copyRow(l, copRowAlphaT); T sumAsq = copRowAlphaT.nrm2sq(); Z.refCol(l, refColZ); // matRSS = X- Z*AlphaT matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); if(sumAsq < T(10e-8)) { // singular matRSS.norm_2_cols(norms); int k = norms.max(); X.refCol(k, refColX); refColZ.copy(refColX); } else { matRSS.rank1Update(refColZ, copRowAlphaT); matRSS.mult(copRowAlphaT, vBarre, 1/sumAsq, T()); // least square to get Beta BetaT.refCol(l, refColBetaT); activeSet<T>(X, vBarre, refColBetaT, lambda2, epsilon); X.mult(refColBetaT, refColZ); } } matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); #endif RSS = matRSS.normFsq(); cout << "RSS AS = " << RSS << endl; flush(cout); } AlphaT.toSparse(A); BetaT.toSparse(B); } template <typename T> void archRobust(const Matrix<T>& X, const Matrix<T>& Z0, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const int I1, const int I2, const T lambda2, const T epsilon, const T epsilon2, const bool computeXtX) { const int m = X.m(); const int n = X.n(); const int p = Z0.n(); Z.copy(Z0); Matrix<T> AlphaT(p,n); Matrix<T> BetaT(n,p); T RSN = -1.0; Vector<T> refColZ; Vector<T> copRowAlphaT; Vector<T> refColBetaT; Matrix<T> matRSS(m,n); Vector<T> vBarre(m); Vector<T> norms; cout.precision(8); for(int t=0; t<I1; ++t) { // step 1: fix Z to compute Alpha #pragma omp parallel for for(int i=0; i<n; ++i) { Vector<T> refColX; Vector<T> refColAlphaT; X.refCol(i,refColX); AlphaT.refCol(i, refColAlphaT); gpFISTAFor(Z, refColX, refColAlphaT, T(1.0), T(1.0/0.7), 10, true); } // update scale factors matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); matRSS.norm_2_cols(norms); norms.thrsmax(epsilon2); norms.Sqrt(); Vector<T> refColX; // step 2: fix Alpha, fix all but one to compute Zi for(int l=0; l<p; ++l) { Z.refCol(l, refColZ); AlphaT.copyRow(l, copRowAlphaT); copRowAlphaT.div(norms); T sumAsq = copRowAlphaT.nrm2sq(); matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); if(sumAsq < T(10e-8)) { // singular matRSS.norm_2_cols(norms); int k = norms.max(); X.refCol(k, refColX); refColZ.copy(refColX); } else { // absorbe the weights by rowAlphaT copRowAlphaT.div(norms); matRSS.mult(copRowAlphaT, vBarre, 1/sumAsq, T()); vBarre.add(refColZ); // least square to get Beta BetaT.refCol(l, refColBetaT); gpFISTAFor(X, vBarre, refColBetaT, T(1.0), T(1.0/0.7), 10, true); X.mult(refColBetaT, refColZ); } } matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); matRSS.norm_2_cols(norms); for (int i=0; i<norms.n(); ++i) if (norms[i] <= epsilon2) norms[i]=norms[i]*norms[i]/(2*epsilon2) + epsilon2/2; RSN = norms.sum(); cout << "RSN FISTA= " << RSN << endl; flush(cout); } for(int t=0; t<I2; ++t) { Matrix<T> G; if (computeXtX) { Z.XtX(G); G.addDiag(lambda2*lambda2); } // step 1: fix Z to compute Alpha #pragma omp parallel for for(int i=0; i<n; ++i) { Vector<T> refColX; Vector<T> refColAlphaT; X.refCol(i,refColX); AlphaT.refCol(i, refColAlphaT); if (computeXtX) { activeSetS<T>(Z,refColX, refColAlphaT, G, lambda2, epsilon); } else { activeSet<T>(Z,refColX, refColAlphaT, lambda2, epsilon); } } // update scale factors #ifndef NEW_VERSION matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); matRSS.norm_2_cols(norms); norms.thrsmax(epsilon2); norms.Sqrt(); // step 2: fix Alpha, fix all but one to compute Zi Vector<T> refColX; for(int l=0; l<p; ++l) { Z.refCol(l, refColZ); AlphaT.copyRow(l, copRowAlphaT); copRowAlphaT.div(norms); T sumAsq = copRowAlphaT.nrm2sq(); matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); if(sumAsq < T(10e-8)) { // singular matRSS.norm_2_cols(norms); int k = norms.max(); X.refCol(k, refColX); refColZ.copy(refColX); } else { // absorbe the weights by rowAlphaT copRowAlphaT.div(norms); matRSS.mult(copRowAlphaT, vBarre, 1/sumAsq, T()); vBarre.add(refColZ); // least square to get Beta BetaT.refCol(l, refColBetaT); activeSet<T>(X, vBarre, refColBetaT, lambda2, epsilon); X.mult(refColBetaT, refColZ); } } matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); #else /// new version Vector<T> refColX; Vector<T> tmp; Vector<T> tmp2; matRSS.copy(X); Z.mult(AlphaT, matRSS, false, false, T(-1.0), T(1.0)); matRSS.norm_2_cols(norms); norms.thrsmax(epsilon2); norms.Sqrt(); for(int l=0; l<p; ++l) { Z.refCol(l, refColZ); AlphaT.copyRow(l, copRowAlphaT); tmp2.copy(copRowAlphaT); copRowAlphaT.div(norms); T sumAsq = copRowAlphaT.nrm2sq(); if(sumAsq < T(10e-8)) { // singular matRSS.norm_2_cols(tmp); int k = tmp.max(); X.refCol(k, refColX); refColZ.copy(refColX); } else { // absorbe the weights by rowAlphaT copRowAlphaT.div(norms); matRSS.mult(copRowAlphaT, vBarre, 1/sumAsq, T()); vBarre.add(refColZ); tmp.copy(refColZ); // least square to get Beta BetaT.refCol(l, refColBetaT); activeSet<T>(X, vBarre, refColBetaT, lambda2, epsilon); X.mult(refColBetaT, refColZ); tmp.sub(refColZ); matRSS.rank1Update(tmp,tmp2); } } #endif /// end new version matRSS.norm_2_cols(norms); for (int i=0; i<norms.n(); ++i) if (norms[i] <= epsilon2) norms[i]=norms[i]*norms[i]/(2*epsilon2) + epsilon2/2; RSN = norms.sum(); cout << "RSN AS= " << RSN << endl; flush(cout); } AlphaT.toSparse(A); BetaT.toSparse(B); } template <typename T> void archetypalAnalysis(const Matrix<T>& X, const Matrix<T>& Z0, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const bool robust, const T epsilon2, const bool computeXtX, const int stepsFISTA, const int stepsAS, const int numThreads) { init_omp(numThreads); const T epsilon = 1e-5; const T lambda2 = 1e-5; if (!robust) { arch(X, Z0, Z, A, B, stepsFISTA, stepsAS, epsilon,lambda2,computeXtX); } else { archRobust(X, Z0, Z, A, B, stepsFISTA, stepsAS, epsilon,lambda2,epsilon2,computeXtX); } } template <typename T> void archetypalAnalysis(const Matrix<T>& X, Matrix<T>& Z, SpMatrix<T>& A, SpMatrix<T>& B, const bool robust, const T epsilon2, const bool computeXtX, const int stepsFISTA, const int stepsAS, const bool randominit, const int numThreads) { const int m = X.m(); const int n = X.n(); const int p = Z.n(); Matrix<T> Z0(m,p); Vector<T> refColZ0; Vector<T> refColX; if(!randominit) { for(int i=0; i<p; i++) { X.refCol(i%n, refColX); Z0.refCol(i%n, refColZ0); refColZ0.copy(refColX); } } else { srandom(0); for(int i=0; i<p; i++) { int k = random() % n; X.refCol(k, refColX); Z0.refCol(i, refColZ0); refColZ0.copy(refColX); } } archetypalAnalysis(X, Z0, Z, A, B, robust, epsilon2, computeXtX, stepsFISTA, stepsAS,numThreads); } template <typename T> void decompSimplex(const Matrix<T>& X, const Matrix<T>& Z, SpMatrix<T>& alpha, const bool computeZtZ, const int numThreads) { init_omp(numThreads); const int n = X.n(); const int p = Z.n(); Matrix<T> AlphaT(p,n); int i; if(computeZtZ) { Matrix<T> G; Z.XtX(G); T lambda2 = 1e-5; G.addDiag(lambda2*lambda2); #pragma omp parallel for private(i) for(i=0; i<n; ++i) { Vector<T> refColX; Vector<T> refColAlphaT; X.refCol(i,refColX); AlphaT.refCol(i, refColAlphaT); activeSetS(Z,refColX, refColAlphaT, G); } AlphaT.toSparse(alpha); } else { #pragma omp parallel for private(i) for(i=0; i<n; ++i) { Vector<T> refColX; Vector<T> refColAlphaT; X.refCol(i,refColX); AlphaT.refCol(i, refColAlphaT); activeSet(Z,refColX, refColAlphaT); } AlphaT.toSparse(alpha); } } #endif
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include "http_stream.h" #include <stdio.h> #include <stdlib.h> #include <string.h> extern int check_mistakes; #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed, int contrastive) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { if (contrastive && (i % 2) == 1) start_time_indexes[i] = start_time_indexes[i - 1]; else start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //printf(" index = %d, ", index); //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths_custom(char **paths, int n, int m, int contrastive) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); int old_index = 0; //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; if (contrastive && (i % 2 == 1)) index = old_index; else old_index = index; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **get_random_paths(char **paths, int n, int m) { return get_random_paths_custom(paths, n, m, 0); } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv, int contrastive) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; const int img_index = (contrastive) ? (i / 2) : i; if(dontuse_opencv) im = load_image_stb_resize(paths[img_index], 0, 0, 3); else im = load_image_color(paths[img_index], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[img_index]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) { printf("\n Error in read_boxes() \n"); getchar(); } *n = 0; return boxes; } const int max_obj_img = 4000;// 30000; const int img_hash = (custom_hash(filename) % max_obj_img)*max_obj_img; //printf(" img_hash = %d, filename = %s; ", img_hash, filename); float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].track_id = count + img_hash; //printf(" boxes[count].track_id = %d, count = %d \n", boxes[count].track_id, count); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, int truth_size, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; int track_id = boxes[i].track_id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); if (check_mistakes) getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*truth_size +0] = x; truth[(i-sub)*truth_size +1] = y; truth[(i-sub)*truth_size +2] = w; truth[(i-sub)*truth_size +3] = h; truth[(i-sub)*truth_size +4] = id; truth[(i-sub)*truth_size +5] = track_id; //float val = track_id; //printf(" i = %d, sub = %d, truth_size = %d, track_id = %d, %f, %f\n", i, sub, truth_size, track_id, truth[(i - sub)*truth_size + 5], val); if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } int find_max(float *arr, int size) { int i; float max = 0; int n = 0; for (i = 0; i < size; ++i) { if (arr[i] > max) { max = arr[i]; n = i; } } return n; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps, int contrastive) { matrix y = make_matrix(n, k); int i; if (labels) { // supervised learning for (i = 0; i < n; ++i) { const int img_index = (contrastive) ? (i / 2) : i; fill_truth_smooth(paths[img_index], labels, k, y.vals[i], label_smooth_eps); //printf(" n = %d, i = %d, img_index = %d, class_id = %d \n", n, i, img_index, find_max(y.vals[i], k)); if (hierarchy) { fill_hierarchy(y.vals[i], k, hierarchy); } } } else { // unsupervised learning for (i = 0; i < n; ++i) { const int img_index = (contrastive) ? (i / 2) : i; const uintptr_t path_p = (uintptr_t)paths[img_index];// abs(random_gen()); const int class_id = path_p % k; int l; for (l = 0; l < k; ++l) y.vals[i][l] = 0; y.vals[i][class_id] = 1; } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, int truth_size, float *old_truth) { int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*truth_size]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*truth_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, int truth_size, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift, int net_w, int net_h, int mosaic_bound) { const float lowest_w = 1.F / net_w; const float lowest_h = 1.F / net_h; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*truth_size]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*truth_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; if(mosaic_bound) { // fix out of Mosaic-bound float left_bound = 0, right_bound = 0, top_bound = 0, bot_bound = 0; if (i_mixup == 0) { left_bound = 0; right_bound = cut_x; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 1) { left_bound = cut_x; right_bound = w; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 2) { left_bound = 0; right_bound = cut_x; top_bound = cut_y; bot_bound = h; } if (i_mixup == 3) { left_bound = cut_x; right_bound = w; top_bound = cut_y; bot_bound = h; } if (left < left_bound) { //printf(" i_mixup = %d, left = %d, left_bound = %f \n", i_mixup, left, left_bound); left = left_bound; } if (right > right_bound) { //printf(" i_mixup = %d, right = %d, right_bound = %f \n", i_mixup, right, right_bound); right = right_bound; } if (top < top_bound) top = top_bound; if (bot > bot_bound) bot = bot_bound; xb = ((float)(right + left) / 2) / w; wb = ((float)(right - left)) / w; yb = ((float)(bot + top) / 2) / h; hb = ((float)(bot - top)) / h; } else { // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; } // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1 && wb > lowest_w && hb > lowest_h) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int use_gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; if (use_mixup == 2 || use_mixup == 4) { printf("\n cutmix=1 - isn't supported for Detector (use cutmix=1 only for Classifier) \n"); if (check_mistakes) getchar(); if(use_mixup == 2) use_mixup = 0; else use_mixup = 3; } if (use_mixup == 3 && letter_box) { //printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); //if (check_mistakes) getchar(); //exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0, gaussian_noise = 0; d.y = make_matrix(n, truth_size*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else random_paths = get_random_paths_custom(paths, n, m, contrastive); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { printf("\n Error in load_data_detection() - OpenCV \n"); fflush(stdout); if (check_mistakes) { getchar(); } continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; // < 0 int min_rdh = oh*(1 - (1 / resize_down)) / 2; // < 0 if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; // > 0 int max_rdh = oh*(1 - (1 / resize_up)) / 2; // > 0 //printf(" down = %f, up = %f \n", (1 - (1 / resize_down)) / 2, (1 - (1 / resize_up)) / 2); if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); if (!contrastive || contrastive_jit_flip || i % 2 == 0) { r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); flip = use_flip ? random_gen() % 2 : 0; } r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } if (use_gaussian_noise && rand_int(0, 1) == 1) gaussian_noise = use_gaussian_noise; else gaussian_noise = 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently //printf(" letter_box = %d \n", letter_box); if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); } // move each 2nd image to the corner - so that most of it was visible if (use_mixup == 3 && random_gen() % 2 == 0) { if (flip) { if (i_mixup == 0) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pleft += pright, pright = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; } else { if (i_mixup == 0) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pleft += pright, pright = 0, ptop += pbot, pbot = 0; } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); //for (int z = 0; z < boxes; ++z) if(truth[z*truth_size] > 0) printf(" track_id = %f \n", truth[z*truth_size + 5]); //printf(" truth_size = %d \n", truth_size); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, boxes, truth_size, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth_size, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth_size, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift, w, h, mosaic_bound); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else random_paths = get_random_paths_custom(paths, n, m, contrastive); //assert(use_mixup < 2); if (use_mixup == 2) { printf("\n cutmix=1 - isn't supported for Detector \n"); exit(0); } if (use_mixup == 3 || use_mixup == 4) { printf("\n mosaic=1 - compile Darknet with OpenCV for using mosaic=1 \n"); exit(0); } int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, truth_size * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; int min_rdh = oh*(1 - (1 / resize_down)) / 2; if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; int max_rdh = oh*(1 - (1 / resize_up)) / 2; if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); if (!contrastive || contrastive_jit_flip || i % 2 == 0) { r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); flip = use_flip ? random_gen() % 2 : 0; } r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, truth_size, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv, a.contrastive); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.truth_size, a.classes, a.flip, a.gaussian_noise, a.blur, a.mixup, a.jitter, a.resize, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.mosaic_bound, a.contrastive, a.contrastive_jit_flip, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } static const int thread_wait_ms = 5; static volatile int flag_exit; static volatile int * run_load_data = NULL; static load_args * args_swap = NULL; static pthread_t* threads = NULL; pthread_mutex_t mtx_load_data = PTHREAD_MUTEX_INITIALIZER; void *run_thread_loop(void *ptr) { const int i = *(int *)ptr; while (!custom_atomic_load_int(&flag_exit)) { while (!custom_atomic_load_int(&run_load_data[i])) { if (custom_atomic_load_int(&flag_exit)) { free(ptr); return 0; } this_thread_sleep_for(thread_wait_ms); } pthread_mutex_lock(&mtx_load_data); load_args *args_local = (load_args *)xcalloc(1, sizeof(load_args)); *args_local = args_swap[i]; pthread_mutex_unlock(&mtx_load_data); load_thread(args_local); custom_atomic_store_int(&run_load_data[i], 0); } free(ptr); return 0; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); if (!threads) { threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); run_load_data = (volatile int *)xcalloc(args.threads, sizeof(int)); args_swap = (load_args *)xcalloc(args.threads, sizeof(load_args)); fprintf(stderr, " Create %d permanent cpu-threads \n", args.threads); for (i = 0; i < args.threads; ++i) { int* ptr = (int*)xcalloc(1, sizeof(int)); *ptr = i; if (pthread_create(&threads[i], 0, run_thread_loop, ptr)) error("Thread creation failed"); } } for (i = 0; i < args.threads; ++i) { args.d = buffers + i; args.n = (i + 1) * total / args.threads - i * total / args.threads; pthread_mutex_lock(&mtx_load_data); args_swap[i] = args; pthread_mutex_unlock(&mtx_load_data); custom_atomic_store_int(&run_load_data[i], 1); // run thread } for (i = 0; i < args.threads; ++i) { while (custom_atomic_load_int(&run_load_data[i])) this_thread_sleep_for(thread_wait_ms); // join } /* pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } */ *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); //free(threads); return 0; } void free_load_threads(void *ptr) { load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; int i; if (threads) { custom_atomic_store_int(&flag_exit, 1); for (i = 0; i < args.threads; ++i) { pthread_join(threads[i], 0); } free((void*)run_load_data); free(args_swap); free(threads); threads = NULL; custom_atomic_store_int(&flag_exit, 0); } } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv, int contrastive) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps, contrastive); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 4 == 0) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 15; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
DRB059-lastprivate-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Using lastprivate() to resolve an output dependence. Semantics of lastprivate (x): causes the corresponding original list item to be updated after the end of the region. The compilerruntime copies the local value back to the shared one within the last iteration. */ #include <stdio.h> void foo() { int i, x; #pragma cetus private(i) #pragma cetus lastprivate(x) #pragma loop name foo#0 #pragma cetus parallel #pragma omp parallel for private(i) lastprivate(x) for (i=0; i<100; i ++ ) { x=i; } printf("x=%d", x); return ; } int main() { int _ret_val_0; foo(); _ret_val_0=0; return _ret_val_0; }
feature_group.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifndef LIGHTGBM_FEATURE_GROUP_H_ #define LIGHTGBM_FEATURE_GROUP_H_ #include <LightGBM/bin.h> #include <LightGBM/meta.h> #include <LightGBM/utils/random.h> #include <cstdio> #include <memory> #include <vector> namespace LightGBM { class Dataset; class DatasetLoader; struct TrainingShareStates; class MultiValBinWrapper; /*! \brief Using to store data and providing some operations on one feature * group*/ class FeatureGroup { public: friend Dataset; friend DatasetLoader; friend TrainingShareStates; friend MultiValBinWrapper; /*! * \brief Constructor * \param num_feature number of features of this group * \param bin_mappers Bin mapper for features * \param num_data Total number of data * \param is_enable_sparse True if enable sparse feature */ FeatureGroup(int num_feature, int8_t is_multi_val, std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data, int group_id) : num_feature_(num_feature), is_multi_val_(is_multi_val > 0), is_sparse_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), num_feature); auto& ref_bin_mappers = *bin_mappers; double sum_sparse_rate = 0.0f; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); sum_sparse_rate += bin_mappers_.back()->sparse_rate(); } sum_sparse_rate /= num_feature_; int offset = 1; is_dense_multi_val_ = false; if (sum_sparse_rate < MultiValBin::multi_val_bin_sparse_threshold && is_multi_val_) { // use dense multi val bin offset = 0; is_dense_multi_val_ = true; } // use bin at zero to store most_freq_bin only when not using dense multi val bin num_total_bin_ = offset; // however, we should force to leave one bin, if dense multi val bin is the first bin // and its first feature has most freq bin > 0 if (group_id == 0 && num_feature_ > 0 && is_dense_multi_val_ && bin_mappers_[0]->GetMostFreqBin() > 0) { num_total_bin_ = 1; } bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, is_multi_val_, true, false); } FeatureGroup(const FeatureGroup& other, int num_data) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_dense_multi_val_ = other.is_dense_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } CreateBinData(num_data, is_multi_val_, !is_sparse_, is_sparse_); } FeatureGroup(std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(1), is_multi_val_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), 1); // use bin at zero to store default_bin num_total_bin_ = 1; is_dense_multi_val_ = false; bin_offsets_.emplace_back(num_total_bin_); auto& ref_bin_mappers = *bin_mappers; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, false, false, false); } /*! * \brief Constructor from memory * \param memory Pointer of memory * \param num_all_data Number of global data * \param local_used_indices Local used indices, empty means using all data */ FeatureGroup(const void* memory, data_size_t num_all_data, const std::vector<data_size_t>& local_used_indices, int group_id) { const char* memory_ptr = reinterpret_cast<const char*>(memory); // get is_sparse is_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_multi_val_)); is_dense_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_dense_multi_val_)); is_sparse_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_sparse_)); num_feature_ = *(reinterpret_cast<const int*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(num_feature_)); // get bin mapper bin_mappers_.clear(); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(new BinMapper(memory_ptr)); memory_ptr += bin_mappers_[i]->SizesInByte(); } bin_offsets_.clear(); int offset = 1; if (is_dense_multi_val_) { offset = 0; } // use bin at zero to store most_freq_bin only when not using dense multi val bin num_total_bin_ = offset; // however, we should force to leave one bin, if dense multi val bin is the first bin // and its first feature has most freq bin > 0 if (group_id == 0 && num_feature_ > 0 && is_dense_multi_val_ && bin_mappers_[0]->GetMostFreqBin() > 0) { num_total_bin_ = 1; } bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } data_size_t num_data = num_all_data; if (!local_used_indices.empty()) { num_data = static_cast<data_size_t>(local_used_indices.size()); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } multi_bin_data_.back()->LoadFromMemory(memory_ptr, local_used_indices); memory_ptr += multi_bin_data_.back()->SizesInByte(); } } else { if (is_sparse_) { bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } // get bin data bin_data_->LoadFromMemory(memory_ptr, local_used_indices); } } /*! \brief Destructor */ ~FeatureGroup() {} /*! * \brief Push one record, will auto convert to bin and push to bin data * \param tid Thread id * \param idx Index of record * \param value feature value of record */ inline void PushData(int tid, int sub_feature_idx, data_size_t line_idx, double value) { uint32_t bin = bin_mappers_[sub_feature_idx]->ValueToBin(value); if (bin == bin_mappers_[sub_feature_idx]->GetMostFreqBin()) { return; } if (bin_mappers_[sub_feature_idx]->GetMostFreqBin() == 0) { bin -= 1; } if (is_multi_val_) { multi_bin_data_[sub_feature_idx]->Push(tid, line_idx, bin + 1); } else { bin += bin_offsets_[sub_feature_idx]; bin_data_->Push(tid, line_idx, bin); } } void ReSize(int num_data) { if (!is_multi_val_) { bin_data_->ReSize(num_data); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->ReSize(num_data); } } } inline void CopySubrow(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices) { if (!is_multi_val_) { bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->CopySubrow(full_feature->multi_bin_data_[i].get(), used_indices, num_used_indices); } } } inline void CopySubrowByCol(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices, int fidx) { if (!is_multi_val_) { bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices); } else { multi_bin_data_[fidx]->CopySubrow(full_feature->multi_bin_data_[fidx].get(), used_indices, num_used_indices); } } void AddFeaturesFrom(const FeatureGroup* other, int group_id) { CHECK(is_multi_val_); CHECK(other->is_multi_val_); // every time when new features are added, we need to reconsider sparse or dense double sum_sparse_rate = 0.0f; for (int i = 0; i < num_feature_; ++i) { sum_sparse_rate += bin_mappers_[i]->sparse_rate(); } for (int i = 0; i < other->num_feature_; ++i) { sum_sparse_rate += other->bin_mappers_[i]->sparse_rate(); } sum_sparse_rate /= (num_feature_ + other->num_feature_); int offset = 1; is_dense_multi_val_ = false; if (sum_sparse_rate < MultiValBin::multi_val_bin_sparse_threshold && is_multi_val_) { // use dense multi val bin offset = 0; is_dense_multi_val_ = true; } bin_offsets_.clear(); num_total_bin_ = offset; // however, we should force to leave one bin, if dense multi val bin is the first bin // and its first feature has most freq bin > 0 if (group_id == 0 && num_feature_ > 0 && is_dense_multi_val_ && bin_mappers_[0]->GetMostFreqBin() > 0) { num_total_bin_ = 1; } bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } for (int i = 0; i < other->num_feature_; ++i) { const auto& other_bin_mapper = other->bin_mappers_[i]; bin_mappers_.emplace_back(new BinMapper(*other_bin_mapper)); auto num_bin = other_bin_mapper->num_bin(); if (other_bin_mapper->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); multi_bin_data_.emplace_back(other->multi_bin_data_[i]->Clone()); } num_feature_ += other->num_feature_; } inline BinIterator* SubFeatureIterator(int sub_feature) { uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t min_bin = 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; return multi_bin_data_[sub_feature]->GetIterator(min_bin, max_bin, most_freq_bin); } } inline void FinishLoad() { if (is_multi_val_) { OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_feature_; ++i) { OMP_LOOP_EX_BEGIN(); multi_bin_data_[i]->FinishLoad(); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { bin_data_->FinishLoad(); } } inline BinIterator* FeatureGroupIterator() { if (is_multi_val_) { return nullptr; } uint32_t min_bin = bin_offsets_[0]; uint32_t max_bin = bin_offsets_.back() - 1; uint32_t most_freq_bin = 0; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } inline size_t FeatureGroupSizesInByte() { return bin_data_->SizesInByte(); } inline void* FeatureGroupData() { if (is_multi_val_) { return nullptr; } return bin_data_->get_data(); } inline data_size_t Split(int sub_feature, const uint32_t* threshold, int num_threshold, bool default_left, const data_size_t* data_indices, data_size_t cnt, data_size_t* lte_indices, data_size_t* gt_indices) const { uint32_t default_bin = bin_mappers_[sub_feature]->GetDefaultBin(); uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); if (num_feature_ == 1) { return bin_data_->Split(max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->Split(min_bin, max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } } else { if (num_feature_ == 1) { return bin_data_->SplitCategorical(max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->SplitCategorical( min_bin, max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); return multi_bin_data_[sub_feature]->Split( max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return multi_bin_data_[sub_feature]->SplitCategorical( max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } /*! * \brief From bin to feature value * \param bin * \return FeatureGroup value of this bin */ inline double BinToValue(int sub_feature_idx, uint32_t bin) const { return bin_mappers_[sub_feature_idx]->BinToValue(bin); } /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const { writer->AlignedWrite(&is_multi_val_, sizeof(is_multi_val_)); writer->AlignedWrite(&is_dense_multi_val_, sizeof(is_dense_multi_val_)); writer->AlignedWrite(&is_sparse_, sizeof(is_sparse_)); writer->AlignedWrite(&num_feature_, sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { bin_mappers_[i]->SaveBinaryToFile(writer); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->SaveBinaryToFile(writer); } } else { bin_data_->SaveBinaryToFile(writer); } } /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const { size_t ret = VirtualFileWriter::AlignedSize(sizeof(is_multi_val_)) + VirtualFileWriter::AlignedSize(sizeof(is_dense_multi_val_)) + VirtualFileWriter::AlignedSize(sizeof(is_sparse_)) + VirtualFileWriter::AlignedSize(sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { ret += bin_mappers_[i]->SizesInByte(); } if (!is_multi_val_) { ret += bin_data_->SizesInByte(); } else { for (int i = 0; i < num_feature_; ++i) { ret += multi_bin_data_[i]->SizesInByte(); } } return ret; } /*! \brief Disable copy */ FeatureGroup& operator=(const FeatureGroup&) = delete; /*! \brief Deep copy */ FeatureGroup(const FeatureGroup& other, bool should_handle_dense_mv, int group_id) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_dense_multi_val_ = other.is_dense_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } if (!is_multi_val_) { bin_data_.reset(other.bin_data_->Clone()); } else { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { multi_bin_data_.emplace_back(other.multi_bin_data_[i]->Clone()); } } if (should_handle_dense_mv && is_dense_multi_val_ && group_id > 0) { // this feature group was the first feature group, but now no longer is, // so we need to eliminate its special empty bin for multi val dense bin if (bin_mappers_[0]->GetMostFreqBin() > 0 && bin_offsets_[0] == 1) { for (size_t i = 0; i < bin_offsets_.size(); ++i) { bin_offsets_[i] -= 1; } num_total_bin_ -= 1; } } } private: void CreateBinData(int num_data, bool is_multi_val, bool force_dense, bool force_sparse) { if (is_multi_val) { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } } is_multi_val_ = true; } else { if (force_sparse || (!force_dense && num_feature_ == 1 && bin_mappers_[0]->sparse_rate() >= kSparseThreshold)) { is_sparse_ = true; bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { is_sparse_ = false; bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } is_multi_val_ = false; } } /*! \brief Number of features */ int num_feature_; /*! \brief Bin mapper for sub features */ std::vector<std::unique_ptr<BinMapper>> bin_mappers_; /*! \brief Bin offsets for sub features */ std::vector<uint32_t> bin_offsets_; /*! \brief Bin data of this feature */ std::unique_ptr<Bin> bin_data_; std::vector<std::unique_ptr<Bin>> multi_bin_data_; /*! \brief True if this feature is sparse */ bool is_multi_val_; bool is_dense_multi_val_; bool is_sparse_; int num_total_bin_; }; } // namespace LightGBM #endif // LIGHTGBM_FEATURE_GROUP_H_
second_omp_exemple.c
// second_omp_exemple.c // compile with: /openmp /* ############################################################################# ## DESCRIPTION: Simple exemple to read a name and show your Threads - OpenMp. ## NAME: second_omp_exemple.c ## AUTHOR: Lucca Pessoa da Silva Matos ## DATE: 10.04.2020 ## VERSION: 1.0 ## EXEMPLE: ## PS C:\> gcc -fopenmp -o second_omp_exemple second_omp_exemple.c ##############################################################################*/ // ============================================================================= // LIBRARYS // ============================================================================= #include <omp.h> #include <stdio.h> #include <locale.h> #include <stdlib.h> // ============================================================================= // MACROS // ============================================================================= #define NAME_SIZE 256 // ============================================================================= // CALL FUNCTIONS // ============================================================================= void cabecalho(); void set_portuguese(); // ============================================================================= // MAIN // ============================================================================= int main(int argc, char const *argv[]){ set_portuguese(); cabecalho(); int thread_id, num_threads; // Alocando. char *name = malloc(NAME_SIZE); // Verificando. if (name == NULL){ printf("Sorry... We dont have memory :(\n"); return 1; } printf("\nHey coder! What's your name? "); scanf("%[^\n]s",name); printf("\nHello %s. Nice to meet you.\n", name); printf("\n1 - We are out of the parallel context.\n\n"); // Fork #pragma omp parallel { thread_id = omp_get_thread_num(); num_threads = omp_get_num_threads(); printf("Hey %s! I'm Thread %d - Total %d!\n", name, thread_id, num_threads); } // Join printf("\n2 - We are out of the parallel context.\n\n"); return 0; } // ============================================================================= // FUNCTIONS // ============================================================================= void set_portuguese(){ setlocale(LC_ALL, "Portuguese"); } void cabecalho(){ printf("\n**************************************************"); printf("\n* *"); printf("\n* *"); printf("\n* PROGRAMACAO PARALELA COM OPENMP - LUCCA PESSOA *"); printf("\n* *"); printf("\n* *"); printf("\n**************************************************\n"); }
nodal_two_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2018 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class NodalTwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategy); /// Counted pointer of NodalTwoStepVPStrategy //typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; /// Node type (default is: Node<3>) typedef Node<3> NodeType; /// Geometry type (using with given NodeType) typedef Geometry<NodeType> GeometryType; typedef std::size_t SizeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ NodalTwoStepVPStrategy(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { InitializeStrategy(rSolverConfig); } NodalTwoStepVPStrategy(ModelPart &rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); /* typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new IncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); */ pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuity<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~NodalTwoStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); if (BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize()); if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize()); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl) { ierr = itEl->Check(rCurrentProcessInfo); if (ierr != 0) break; } for (ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) { ierr = itCond->Check(rCurrentProcessInfo); if (ierr != 0) break; } return ierr; KRATOS_CATCH(""); } bool SolveSolutionStep() override { // Initialize BDF2 coefficients ModelPart &rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; bool converged = false; // bool momentumAlreadyConverged=false; // bool continuityAlreadyConverged=false; unsigned int maxNonLinearIterations = mMaxPressureIter; std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl; if (timeIntervalChanged == true && currentTime > 10 * timeInterval) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; double pressureNorm = 0; double velocityNorm = 0; /* boost::timer solve_step_time; */ this->InitializeSolutionStep(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; if (it == 0) { this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); } this->CalcNodalStrainsAndStresses(); momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); this->CalcNodalStrains(); if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm); } // if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // momentumAlreadyConverged=true; // } // if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // continuityAlreadyConverged=true; // } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1)) { //this->ComputeErrorL2NormCaseImposedG(); //this->ComputeErrorL2NormCasePoiseuille(); this->CalculateAccelerations(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } if ((continuityConverged && momentumConverged) && it > 1) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); converged = true; std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ return converged; } void FinalizeSolutionStep() override { /* this->UpdateStressStrain(); */ } void Initialize() override { std::cout << " Initialize in nodal_two_step_v_p_strategy" << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size(); unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) { rNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) { rSpatialDefRate.resize(sizeStrains, false); } noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) { rFgrad.resize(dimension, dimension, false); } noalias(rFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) { rFgradVel.resize(dimension, dimension, false); } noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } this->AssignFluidMaterialToEachNode(itNode); } // } } void UnactiveSliverElements() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); MesherUtilities MesherUtils; double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart); double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size()); double ElementalVolume = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); if (numNodes == (dimension + 1)) { if (dimension == 2) { ElementalVolume = (itElem)->GetGeometry().Area(); } else if (dimension == 3) { ElementalVolume = (itElem)->GetGeometry().Volume(); } if (ElementalVolume < CriticalVolume) { // std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl; (itElem)->Set(ACTIVE, false); } else { (itElem)->Set(ACTIVE, true); } } } } KRATOS_CATCH(""); } void AssignFluidMaterialToEachNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0; itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame; itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff; } void ComputeNodalVolume() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; } } // } } void InitializeSolutionStep() override { this->FillNodalSFDVector(); } void FillNodalSFDVector() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { // ModelPart::NodeIterator NodesBegin; // ModelPart::NodeIterator NodesEnd; // OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); // for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) // { for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { InitializeNodalVariablesForRemeshedDomain(itNode); SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER } } void SetNeighboursOrderToNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodeOrderedNeighbours.size() != neighbourNodes) rNodeOrderedNeighbours.resize(neighbourNodes, false); noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes); rNodeOrderedNeighbours[0] = itNode->Id(); if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id(); } } } void InitializeNodalVariablesForRemeshedDomain(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) rNodalStress.resize(sizeStrains, false); noalias(rNodalStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalDevStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalDevStress.size() != sizeStrains) rNodalDevStress.resize(sizeStrains, false); noalias(rNodalDevStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS_ORDER)) { Vector &rNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodalSFDneighboursOrder.size() != neighbourNodes) rNodalSFDneighboursOrder.resize(neighbourNodes, false); noalias(rNodalSFDneighboursOrder) = ZeroVector(neighbourNodes); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) rNodalSFDneighbours.resize(sizeSDFNeigh, false); noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) rSpatialDefRate.resize(sizeStrains, false); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) rFgrad.resize(dimension, dimension, false); noalias(rFgrad) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) rFgradVel.resize(dimension, dimension, false); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } if (itNode->SolutionStepsDataHas(NODAL_VOLUMETRIC_DEF_RATE)) { itNode->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_EQUIVALENT_STRAIN_RATE)) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; } } void InitializeNonLinearIterations() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { itElem->InitializeNonLinearIteration(rCurrentProcessInfo); } // } } void CalcNodalStrainsAndStresses() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 0.5; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsAndStressesForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } } void CalcNodalStrainsAndStressesForNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double currFirstLame = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { auto &r_stain_tensor2D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; auto &r_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0); r_stress_tensor2D[0] = nodalSigmaTot_xx; r_stress_tensor2D[1] = nodalSigmaTot_yy; r_stress_tensor2D[2] = nodalSigmaTot_xy; auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor2D[0] = nodalSigmaDev_xx; r_dev_stress_tensor2D[1] = nodalSigmaDev_yy; r_dev_stress_tensor2D[2] = nodalSigmaDev_xy; } else if (dimension == 3) { auto &r_stain_tensor3D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2); r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; auto &r_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0); r_stress_tensor3D[0] = nodalSigmaTot_xx; r_stress_tensor3D[1] = nodalSigmaTot_yy; r_stress_tensor3D[2] = nodalSigmaTot_zz; r_stress_tensor3D[3] = nodalSigmaTot_xy; r_stress_tensor3D[4] = nodalSigmaTot_xz; r_stress_tensor3D[5] = nodalSigmaTot_yz; auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor3D[0] = nodalSigmaDev_xx; r_dev_stress_tensor3D[1] = nodalSigmaDev_yy; r_dev_stress_tensor3D[2] = nodalSigmaDev_zz; r_dev_stress_tensor3D[3] = nodalSigmaDev_xy; r_dev_stress_tensor3D[4] = nodalSigmaDev_xz; r_dev_stress_tensor3D[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsForNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); // Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); // double detFgrad=1.0; // Matrix InvFgrad=ZeroMatrix(dimension,dimension); // Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); double detFgrad = 1.0; Matrix nodalFgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); nodalFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefVol = DefX + DefY; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefZ = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } } void CalcNodalStrains() { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 1.0; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void ComputeAndStoreNodalDeformationGradient(ModelPart::NodeIterator itNode, double theta) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); Matrix Fgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; unsigned int neigh_nodes_id = neighb_nodes[i].Id(); unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; if (neigh_nodes_id != other_neigh_nodes_id) { std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl; } Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; } } } itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD) = Fgrad; itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL) = FgradVel; KRATOS_CATCH(""); } void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; /* this->CalculateDisplacements(); */ this->CalculateDisplacementsAndResetNodalVariables(); BaseType::MoveMesh(); BoundaryNormalsCalculationUtilities BoundaryComputation; BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); KRATOS_CATCH(""); } void CalculatePressureVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; } } } void CalculatePressureAcceleration() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval; } } } void CalculateAccelerations() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(NODAL_VOLUME) = 0.0; (i)->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; (i)->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity, Vector &BDFcoeffs) { /* noalias(PreviousAcceleration)=CurrentAcceleration; */ noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration; // std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t) // std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t) // std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t) } void CalculateDisplacements() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); /* if( i->IsFixed(DISPLACEMENT_X) == false ) */ CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; /* if( i->IsFixed(DISPLACEMENT_Y) == false ) */ CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; /* if( i->IsFixed(DISPLACEMENT_Z) == false ) */ CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } } void CalculateDisplacementsAndResetNodalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; if (dimension == 3) { CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } ///// reset Nodal variables ////// Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeSDFNeigh = rNodalSFDneighbours.size(); // unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1; // unsigned int sizeSDFNeigh=neighbourNodes*dimension; i->FastGetSolutionStepValue(NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); noalias(rFgrad) = ZeroMatrix(dimension, dimension); Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } // } } void UpdatePressureAccelerations() { this->CalculateAccelerations(); this->CalculatePressureVelocity(); this->CalculatePressureAcceleration(); } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "NodalTwoStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "NodalTwoStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedMomentum = false; double NormDv = 0; fixedTimeStep = false; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); /* this->SetNeighboursVelocityId(); */ } NormDv = mpMomentumStrategy->Solve(); if (BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "-------------- s o l v e d ! ------------------" << std::endl; if (it == 0) { velocityNorm = this->ComputeVelocityNorm(); } double DvErrorNorm = NormDv / velocityNorm; // double DvErrorNorm = 0; // ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm); unsigned int iterationForCheck = 3; KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << std::endl; // Check convergence if (it == maxIt - 1) { std::cout << " iteration(" << it << ") Final Velocity error: " << DvErrorNorm << std::endl; fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm); } else if (it > iterationForCheck) { fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm); } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return ConvergedMomentum; } bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedContinuity = false; double NormDp = 0; // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } NormDp = mpPressureStrategy->Solve(); if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "The norm of pressure is: " << NormDp << std::endl; if (it == 0) { NormP=this->ComputePressureNorm(); } double DpErrorNorm = NormDp / (NormP); // double DpErrorNorm = 0; // ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm); // Check convergence if (it == maxIt - 1) { std::cout << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << std::endl; ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm); } else { std::cout << " iteration(" << it << ") Pressure error: " << DpErrorNorm << std::endl; } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return ConvergedContinuity; } bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; errorNormDv = 0; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; errorNormDv = NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } /* else{ */ /* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */ /* } */ if (errorNormDv < mVelocityTolerance) { return true; } else { return false; } } double ComputeVelocityNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; return NormV; } double ComputePressureNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; #pragma omp parallel reduction(+ \ : NormP) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; return NormP; } void ComputeErrorL2NormCaseImposedG() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2Velocity = 0; double sumErrorL2VelocityX = 0; double sumErrorL2VelocityY = 0; double sumErrorL2Pressure = 0; double sumErrorL2TauXX = 0; double sumErrorL2TauYY = 0; double sumErrorL2TauXY = 0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double pressure = itNode->FastGetSolutionStepValue(PRESSURE); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3)); double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3)); double expectedPressure = -posX * (1.0 - posX); double expectedTauXX = 2.0 * (-4.0 * (1 - posX) * posX * (-1.0 + 2.0 * posX) * posY * (1.0 - 3.0 * posY + 2.0 * pow(posY, 2))); double expectedTauYY = 2.0 * (4.0 * posX * (1.0 - 3.0 * posX + 2.0 * pow(posX, 2)) * (1 - posY) * posY * (-1.0 + 2.0 * posY)); double expectedTauXY = (2.0 * (1.0 - 6.0 * posY + 6.0 * pow(posY, 2)) * (1 - posX) * (1 - posX) * pow(posX, 2) - 2.0 * (1.0 - 6.0 * posX + 6.0 * pow(posX, 2)) * (1 - posY) * (1 - posY) * pow(posY, 2)); double nodalErrorVelocityX = velX - expectedVelocityX; double nodalErrorVelocityY = velY - expectedVelocityY; double nodalErrorPressure = pressure - expectedPressure; double nodalErrorTauXX = tauXX - expectedTauXX; double nodalErrorTauYY = tauYY - expectedTauYY; double nodalErrorTauXY = tauXY - expectedTauXY; sumErrorL2Velocity += (pow(nodalErrorVelocityX, 2) + pow(nodalErrorVelocityY, 2)) * nodalArea; sumErrorL2VelocityX += pow(nodalErrorVelocityX, 2) * nodalArea; sumErrorL2VelocityY += pow(nodalErrorVelocityY, 2) * nodalArea; sumErrorL2Pressure += pow(nodalErrorPressure, 2) * nodalArea; sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * nodalArea; sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * nodalArea; sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * nodalArea; // itNode->FastGetSolutionStepValue(NODAL_ERROR_XX)=nodalErrorTauXX; } } double errorL2Velocity = sqrt(sumErrorL2Velocity); double errorL2VelocityX = sqrt(sumErrorL2VelocityX); double errorL2VelocityY = sqrt(sumErrorL2VelocityY); double errorL2Pressure = sqrt(sumErrorL2Pressure); double errorL2TauXX = sqrt(sumErrorL2TauXX); double errorL2TauYY = sqrt(sumErrorL2TauYY); double errorL2TauXY = sqrt(sumErrorL2TauXY); std::ofstream myfileVelocity; myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n"; myfileVelocity.close(); std::ofstream myfileVelocityX; myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app); myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n"; myfileVelocityX.close(); std::ofstream myfileVelocityY; myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app); myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n"; myfileVelocityY.close(); std::ofstream myfilePressure; myfilePressure.open("errorL2PressureFile.txt", std::ios::app); myfilePressure << currentTime << "\t" << errorL2Pressure << "\n"; myfilePressure.close(); std::ofstream myfileTauXX; myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app); myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n"; myfileTauXX.close(); std::ofstream myfileTauYY; myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app); myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n"; myfileTauYY.close(); std::ofstream myfileTauXY; myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app); myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n"; myfileTauXY.close(); } void ComputeErrorL2NormCasePoiseuille() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2VelocityTheta = 0; double sumErrorL2TauTheta = 0; double r_in = 0.2; double R_out = 0.5; double kappa = r_in / R_out; double omega = 0.5; double viscosity = 100.0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double rPos = sqrt(pow(posX, 2) + pow(posY, 2)); const double cosalfa = posX / rPos; const double sinalfa = posY / rPos; const double sin2alfa = 2.0 * cosalfa * sinalfa; const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out); double computedVelocityTheta = sqrt(pow(velX, 2) + pow(velY, 2)); double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta; double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2); double computedTauTheta = +(tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa; double nodalErrorTauTheta = computedTauTheta - expectedTauTheta; itNode->FastGetSolutionStepValue(NODAL_ERROR_XX) = computedVelocityTheta; // if(posY>-0.01 && posY<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } // if(posX>-0.01 && posX<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * nodalArea; sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * nodalArea; } } double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta); double errorL2TauTheta = sqrt(sumErrorL2TauTheta); std::ofstream myfileVelocity; myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n"; myfileVelocity.close(); } bool CheckPressureConvergence(const double NormDp, double &errorNormDp) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; errorNormDp = 0; // #pragma omp parallel reduction(+:NormP) // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } // } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; errorNormDp = NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " Pressure error: " << errorNormDp << std::endl; } /* else{ */ /* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */ /* } */ if (errorNormDp < mPressureTolerance) { return true; } else return false; } bool FixTimeStepMomentum(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.005; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl; minTolerance = 0.05; if (DvErrorNorm > minTolerance) { std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl; fixedTimeStep = true; // #pragma omp parallel // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } // } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool CheckMomentumConvergence(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.99999; bool fixedTimeStep = false; if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool FixTimeStepContinuity(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.01; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { fixedTimeStep = true; rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true); } else { rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); } return fixedTimeStep; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} // private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy(SolverSettingsType &rSolverConfig) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); //ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance); /* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */ } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", ""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", ""); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. NodalTwoStepVPStrategy &operator=(NodalTwoStepVPStrategy const &rOther) {} /// Copy constructor. NodalTwoStepVPStrategy(NodalTwoStepVPStrategy const &rOther) {} ///@} }; /// Class NodalTwoStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
opencl_encfs_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_encfs; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_encfs); #else #include <string.h> #include <openssl/opensslv.h> #include <openssl/crypto.h> #include <openssl/ssl.h> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/hmac.h> #include <openssl/engine.h> #include "common-opencl.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "encfs_common.h" #include "options.h" #include "misc.h" #define OUTLEN (32 + 16) #include "opencl_pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "encfs-opencl" #define FORMAT_NAME "EncFS" #define OCL_ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define CPU_ALGORITHM_NAME " AES/Blowfish" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*cur_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN MEM_ALIGN_WORD /* This handles all widths */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static int *cracked; static int any_cracked; static const int KEY_CHECKSUM_BYTES = 4; static encfs_common_custom_salt *cur_salt; static struct fmt_tests tests[] = { {"$encfs$192*181474*0*20*f1c413d9a20f7fdbc068c5a41524137a6e3fb231*44*9c0d4e2b990fac0fd78d62c3d2661272efa7d6c1744ee836a702a11525958f5f557b7a973aaad2fd14387b4f", "openwall"}, {"$encfs$128*181317*0*20*e9a6d328b4c75293d07b093e8ec9846d04e22798*36*b9e83adb462ac8904695a60de2f3e6d57018ccac2227251d3f8fc6a8dd0cd7178ce7dc3f", "Jupiter"}, {"$encfs$256*714949*0*20*472a967d35760775baca6aefd1278f026c0e520b*52*ac3b7ee4f774b4db17336058186ab78d209504f8a58a4272b5ebb25e868a50eaf73bcbc5e3ffd50846071c882feebf87b5a231b6", "Valient Gough"}, {"$encfs$256*120918*0*20*e6eb9a85ee1c348bc2b507b07680f4f220caa763*52*9f75473ade3887bca7a7bb113fbc518ffffba631326a19c1e7823b4564ae5c0d1e4c7e4aec66d16924fa4c341cd52903cc75eec4", "Alo3San1t@nats"}, {NULL} }; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_out *output; static pbkdf2_salt currentsalt; static cl_mem mem_in, mem_out, mem_salt, mem_state; static size_t key_buf_size; static int new_keys; static struct fmt_main *self; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; #define cracked_size (sizeof(*cracked) * global_work_size * ocl_v_width) /* * HASH_LOOPS is ideally made by factors of (iteration count - 1) and should * be chosen for a kernel duration of not more than 200 ms */ #define HASH_LOOPS (3 * 251) #define ITERATIONS 181474 /* Just for auto tune */ #define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS) #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } #if 0 struct fmt_main *me; #endif static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = 64 * gws; /// Allocate memory inbuffer = mem_calloc(1, key_buf_size); output = mem_alloc(sizeof(pbkdf2_out) * gws); cracked = mem_calloc(1, cracked_size); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(output); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width); self->params.algorithm_name = valgo; } } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2*HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 2 * (ITERATIONS - 1) + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static void set_salt(void *salt) { cur_salt = (encfs_common_custom_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltLen); currentsalt.length = cur_salt->saltLen; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->keySize + cur_salt->ivLength; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char* get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i, j, index; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } /// Run kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (j = 0; j < (ocl_autotune_running ? 1 : (currentsalt.outlen + 19) / 20); j++) { for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); } /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i; unsigned char master[MAX_KEYLENGTH + MAX_IVLENGTH]; unsigned char tmpBuf[cur_salt->dataLen]; unsigned int checksum = 0; unsigned int checksum2 = 0; memcpy(master, output[index].dk, cur_salt->keySize + cur_salt->ivLength); // First N bytes are checksum bytes. for (i = 0; i < KEY_CHECKSUM_BYTES; ++i) checksum = (checksum << 8) | (unsigned int)cur_salt->data[i]; memcpy(tmpBuf, cur_salt->data + KEY_CHECKSUM_BYTES, cur_salt->keySize + cur_salt->ivLength); encfs_common_streamDecode(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength ,checksum, master); checksum2 = encfs_common_MAC_32(cur_salt, tmpBuf, cur_salt->keySize + cur_salt->ivLength, master); if (checksum2 == checksum) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_encfs = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, tests }, { init, done, reset, fmt_default_prepare, encfs_common_valid, fmt_default_split, fmt_default_binary, encfs_common_get_salt, { encfs_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
truedeplinear-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Linear equation as array subscription #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len=2000; if (argc>1) len = atoi(argv[1]); int a[len]; for (i=0; i<len; i++) a[i]=i; #pragma omp parallel for for (i=0;i<len/2;i++) a[2*i+1]=a[i]+1; return 0; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait == UndefinedPixelTrait) && (draw_info->fill.alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; start=0; s=segment_stack; PushSegmentStack(y_offset,x_offset,x_offset,1); PushSegmentStack(y_offset+1,x_offset,x_offset,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(floodplane_image,image,floodplane_image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelViaPixelInfo(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method,const StopInfo *stops, const size_t number_stops,ExceptionInfo *exception) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(stops != (const StopInfo *) NULL); assert(number_stops > 0); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) image->columns*cosine)+ fabs((double) image->rows*sine); gradient->gradient_vector.x1=0.5*(image->columns-distance*cosine); gradient->gradient_vector.y1=0.5*(image->rows-distance*sine); gradient->gradient_vector.x2=0.5*(image->columns+distance*cosine); gradient->gradient_vector.y2=0.5*(image->rows+distance*sine); } gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt(image->columns*image->columns+ image->rows*image->rows))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) image->columns/2.0; gradient->radii.y=(double) image->rows/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) (MagickMin(image->columns,image->rows))/ 2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=number_stops; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) CopyMagickMemory(gradient->stops,stops,(size_t) number_stops* sizeof(*stops)); /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel=GetPixelChannelChannel(linear_image,i); PixelTrait traits=GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if (((paint_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(linear_image,p) == 0)) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill argument. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target, % const PixelInfo *fill,const MagickBooleanType invert, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,fill,&conform_fill,exception); ConformPixelInfo(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert) SetPixelViaPixelInfo(image,&conform_fill,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImage) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
fill_r_4c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" #include "gto/gto.h" /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr4c_fill_s1(int (*intor)(), double complex *out, double *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int lsh0 = shls_slice[6]; const int lsh1 = shls_slice[7]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t naol = ao_loc[lsh1] - ao_loc[lsh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok, naol}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, lsh, k0, l0; int shls[4]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; (*intor)(out+(l0*naok+k0)*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } } void GTOr4c_drv(int (*intor)(), void (*fill)(), int (*prescreen)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int cache_size = GTOmax_cache_size(intor, shls_slice, 4, atm, natm, bas, nbas, env); #pragma omp parallel { int ish, jsh, ij; double *buf = malloc(sizeof(double) * cache_size); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
GB_unop__round_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__round_fc32_fc32) // op(A') function: GB (_unop_tran__round_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_croundf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_croundf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_croundf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ROUND || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__round_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_croundf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_croundf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__round_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
2426.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute parallel for dist_schedule(static, 1) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
nbody_parallel.c
#include <stdio.h> #include <stdlib.h> #include <math.h> int main(int argc, char *argv[]) { long long nsteps; long long nparticles; double *m; double *x, *y, *z; double *x_tmp, *y_tmp, *z_tmp; double *vx, *vy, *vz; double G; double dt; double dx, dy, dz; double r, a; double softening; long long n, i, j; FILE *f; if (argc < 6) { puts("Please specify arguments correctly.\n\n" "Usage: nbody <nsteps> <dt> <G> <softening> <PARAMETERS_FILE>\n\n" "PARAMETERS_FILE format:\n" " nparticles (on the first line)\n" " m x y z vx vy vz (on each line after the first line)"); return 0x1; } nsteps = atoll(argv[1]); dt = atof(argv[2]); G = atof(argv[3]); softening = atof(argv[4]); f = fopen(argv[5], "r"); if (!f) { printf("Can't open the PARAMETERS_FILE: %s\n", argv[5]); return 0x2; } fscanf(f, "%lld", &nparticles); m = (double *)malloc(nparticles * sizeof(double)); x = (double *)malloc(nparticles * sizeof(double)); y = (double *)malloc(nparticles * sizeof(double)); z = (double *)malloc(nparticles * sizeof(double)); x_tmp = (double *)malloc(nparticles * sizeof(double)); y_tmp= (double *)malloc(nparticles * sizeof(double)); z_tmp = (double *)malloc(nparticles * sizeof(double)); vx = (double *)malloc(nparticles * sizeof(double)); vy = (double *)malloc(nparticles * sizeof(double)); vz = (double *)malloc(nparticles * sizeof(double)); puts("Initial parameters:"); for (i = 0; i < nparticles; i++) { fscanf(f, "%lf %lf %lf %lf %lf %lf %lf", &m[i], &x[i], &y[i], &z[i], &vx[i], &vy[i], &vz[i]); printf("%lld : m = %lg x = %lg y = %lg z = %lg " "vx = %lg vy = %lg vz = %lg\n", i, m[i], x[i], y[i], z[i], vx[i], vy[i], vz[i]); } fclose(f); puts("n = 0"); for (i = 0; i < nparticles; i++) printf("%lld : %lg %lg %lg\n", i, x[i], y[i], z[i]); #pragma omp parallel private(n, j, dx, dy, dz, r, a) for (n = 1; n <= nsteps; n++) { #pragma omp for for (i = 0; i < nparticles; i++) { x_tmp[i] = x[i] + vx[i] * dt; y_tmp[i] = y[i] + vy[i] * dt; z_tmp[i] = z[i] + vz[i] * dt; for (j = 0; j < nparticles; j++) { dx = x[j] - x[i]; dy = y[j] - y[i]; dz = z[j] - z[i]; r = sqrt(dx * dx + dy * dy + dz * dz + softening); a = G * m[j] / (r * r * r) * dt; vx[i] += a * dx; vy[i] += a * dy; vz[i] += a * dz; } } #pragma omp for for (i = 0; i < nparticles; i++) { x[i] = x_tmp[i]; y[i] = y_tmp[i]; z[i] = z_tmp[i]; } #pragma omp master { printf("n = %lld\n", n); for (i = 0; i < nparticles; i++) printf("%lld : %lg %lg %lg\n", i, x[i], y[i], z[i]); } } free(m); free(x); free(y); free(z); free(x_tmp); free(y_tmp); free(z_tmp); free(vx); free(vy); free(vz); return 0; }
core_ztsmqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_tsmqr * * Overwrites the general m1-by-n1 tile A1 and * m2-by-n2 tile A2 with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q * | A2 | * * trans = Plasma_ConjTrans Q^H * | A1 | | A1 A2 | * Q^H * | A2 | * * where Q is a complex unitary matrix defined as the product of k * elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by plasma_core_ztsqrt. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^H from the Left; * - PlasmaRight : apply Q or Q^H from the Right. * * @param[in] trans * - PlasmaNoTrans : Apply Q; * - Plasma_ConjTrans : Apply Q^H. * * @param[in] m1 * The number of rows of the tile A1. m1 >= 0. * * @param[in] n1 * The number of columns of the tile A1. n1 >= 0. * * @param[in] m2 * The number of rows of the tile A2. m2 >= 0. * m2 = m1 if side == PlasmaRight. * * @param[in] n2 * The number of columns of the tile A2. n2 >= 0. * n2 = n1 if side == PlasmaLeft. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m1-by-n1 tile A1. * On exit, A1 is overwritten by the application of Q. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m1). * * @param[in,out] A2 * On entry, the m2-by-n2 tile A2. * On exit, A2 is overwritten by the application of Q. * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m2). * * @param[in] V * The i-th row must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, as returned by * plasma_core_ZTSQRT in the first k columns of its array argument V. * * @param[in] ldv * The leading dimension of the array V. ldv >= max(1,k). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param work * Auxiliary workspace array of length * ldwork-by-n1 if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,m1) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_ztsmqr(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, const plasma_complex64_t *V, int ldv, const plasma_complex64_t *T, int ldt, plasma_complex64_t *work, int ldwork) { // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { plasma_coreblas_error("illegal value of side"); return -1; } if (trans != PlasmaNoTrans && trans != Plasma_ConjTrans) { plasma_coreblas_error("illegal value of trans"); return -2; } if (m1 < 0) { plasma_coreblas_error("illegal value of m1"); return -3; } if (n1 < 0) { plasma_coreblas_error("illegal value of n1"); return -4; } if (m2 < 0 || (m2 != m1 && side == PlasmaRight)) { plasma_coreblas_error("illegal value of m2"); return -5; } if (n2 < 0 || (n2 != n1 && side == PlasmaLeft)) { plasma_coreblas_error("illegal value of n2"); return -6; } if (k < 0 || (side == PlasmaLeft && k > m1) || (side == PlasmaRight && k > n1)) { plasma_coreblas_error("illegal value of k"); return -7; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -8; } if (A1 == NULL) { plasma_coreblas_error("NULL A1"); return -9; } if (lda1 < imax(1, m1)) { plasma_coreblas_error("illegal value of lda1"); return -10; } if (A2 == NULL) { plasma_coreblas_error("NULL A2"); return -11; } if (lda2 < imax(1, m2)) { plasma_coreblas_error("illegal value of lda2"); return -12; } if (V == NULL) { plasma_coreblas_error("NULL V"); return -13; } if (ldv < imax(1, side == PlasmaLeft ? m2 : n2)) { plasma_coreblas_error("illegal value of ldv"); return -14; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -15; } if (ldt < imax(1, ib)) { plasma_coreblas_error("illegal value of ldt"); return -16; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -17; } if (ldwork < imax(1, side == PlasmaLeft ? ib : m1)) { plasma_coreblas_error("illegal value of ldwork"); return -18; } // quick return if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans != PlasmaNoTrans) || (side == PlasmaRight && trans == PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int mi = m1; int ni = n1; if (side == PlasmaLeft) { // H or H^H is applied to C(i:m,1:n). mi = m1 - i; ic = i; } else { // H or H^H is applied to C(1:m,i:n). ni = n1 - i; jc = i; } // Apply H or H^H (NOTE: plasma_core_zparfb used to be core_ztsrfb). plasma_core_zparfb(side, trans, PlasmaForward, PlasmaColumnwise, mi, ni, m2, n2, kb, 0, &A1[lda1*jc+ic], lda1, A2, lda2, &V[ldv*i], ldv, &T[ldt*i], ldt, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_ztsmqr(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, const plasma_complex64_t *V, int ldv, const plasma_complex64_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*n1]) \ depend(inout:A2[0:lda2*n2]) \ depend(in:V[0:ldv*k]) \ depend(in:T[0:ib*k]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *W = (plasma_complex64_t*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? ib : m1; // TODO: double check // Call the kernel. int info = plasma_core_ztsmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_ztsmqr() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
measures_threads.c
#include <numtrd.h> #include <chaininghp.h> #include <migrch.h> #include <fitness/fitness.h> #include <config.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include "fitness_private.h" #include "gyration.h" static FitnessCalc FIT_BUNDLE = {0, 0, NULL, 0, 0}; void FitnessCalc_initialize(const HPElem * chaininghp, int hpSize){ FIT_BUNDLE.chaininghp = chaininghp; FIT_BUNDLE.hpSize = hpSize; FIT_BUNDLE.maxGyration = calc_max_gyration(chaininghp, hpSize); } void FitnessCalc_cleanup(){ return; } /* Returns the FitnessCalc */ FitnessCalc FitnessCalc_get(){ return FIT_BUNDLE; } /* Counts the number of conflicts among the protein beads. */ static int count_collisions(const numtrd *beads, int nBeads){ int i, j; int collisions = 0; for(i = 0; i < nBeads; i++){ numtrd bead = beads[i]; // Check following backbone beads for(j = i+1; j < nBeads; j++){ if(numtrd_equal(bead, beads[j])) collisions++; } } return collisions; } /* Counts the number of contacts among the protein beads. */ static int count_contacts(const numtrd *beads, int nBeads){ int i, j; int contacts = 0; for(i = 0; i < nBeads; i++){ numtrd bead = beads[i]; // Check following backbone beads for(j = i+1; j < nBeads; j++){ if(numtrd_isDist1(bead, beads[j])) contacts++; } } return contacts; } BeadMeasures proteinMeasures(const numtrd *BBbeads, const numtrd *SCbeads, const HPElem *chaininghp, int hpSize){ int i; // Create vectors with desired coordinates of beads numtrd *coordsAll = malloc(sizeof(numtrd) * hpSize * 2); int sizeAll = 0; numtrd *coordsBB = malloc(sizeof(numtrd) * hpSize); int sizeBB = 0; numtrd *coordsHB = malloc(sizeof(numtrd) * hpSize * 2); int sizeHB = 0; numtrd *coordsPB = malloc(sizeof(numtrd) * hpSize * 2); int sizePB = 0; numtrd *coordsHH = malloc(sizeof(numtrd) * hpSize); int sizeHH = 0; numtrd *coordsHP = malloc(sizeof(numtrd) * hpSize); int sizeHP = 0; numtrd *coordsPP = malloc(sizeof(numtrd) * hpSize); int sizePP = 0; for(i = 0; i < hpSize; i++){ coordsAll[sizeAll++] = BBbeads[i]; coordsBB[sizeBB++] = BBbeads[i]; coordsHB[sizeHB++] = BBbeads[i]; coordsPB[sizePB++] = BBbeads[i]; } for(i = 0; i < hpSize; i++){ coordsAll[sizeAll++] = SCbeads[i]; coordsHP[sizeHP++] = SCbeads[i]; if(chaininghp[i] == 'H'){ coordsHH[sizeHH++] = SCbeads[i]; coordsHB[sizeHB++] = SCbeads[i]; } else { coordsPP[sizePP++] = SCbeads[i]; coordsPB[sizePB++] = SCbeads[i]; } } BeadMeasures retval; #pragma omp parallel for schedule(dynamic, 1) for(i = 0; i < 7; i++){ switch(i){ case 0: retval.hh = count_contacts(coordsHH, sizeHH); break; case 1: retval.pp = count_contacts(coordsPP, sizePP); break; case 2: retval.hp = count_contacts(coordsHP, sizeHP) - retval.hh - retval.pp; // HP = all - HH - PP break; case 3: retval.bb = count_contacts(coordsBB, sizeBB); break; case 4: retval.hb = count_contacts(coordsHB, sizeHB) - retval.hh - retval.bb; // HB = all - HH - BB break; case 5: retval.pb = count_contacts(coordsPB, sizePB) - retval.pp - retval.bb; // PB = all - PP - BB break; case 6: retval.collisions = count_collisions(coordsAll, sizeAll); break; default: break; } } // Remove the trivial contacts retval.bb -= (hpSize - 1); retval.hb -= (sizeHH); retval.pb -= (sizePP); // Linearize amount of collisions and contacts retval.hh = sqrt(retval.hh); retval.pp = sqrt(retval.pp); retval.hp = sqrt(retval.hp); retval.bb = sqrt(retval.bb); retval.hb = sqrt(retval.hb); retval.pb = sqrt(retval.pb); retval.collisions = sqrt(retval.collisions); free(coordsAll); free(coordsBB); free(coordsHB); free(coordsPB); free(coordsHH); free(coordsHP); free(coordsPP); return retval; }
GB_unop__identity_bool_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_uint64) // op(A') function: GB (_unop_tran__identity_bool_uint64) // C type: bool // A type: uint64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_uint64) ( bool *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif