source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unaryop__minv_uint16_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_int64 // op(A') function: GB_tran__minv_uint16_int64 // C type: uint16_t // A type: int64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_int64 ( uint16_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
makeYbus.h
/* * makYbus.cuh * * Created on: 23/09/2015 * Author: Igor M. Araújo */ #ifndef MAKEYBUS_CUH_ #define MAKEYBUS_CUH_ #include <util/complexUtils.h> #include <iostream> using namespace std; __host__ void mkl_computeCfCt( Branch *branches, cuDoubleComplex *cooValCf, int *cooRowCf, int *cooColCf, cuDoubleComplex *cooValCt, int *cooRowCt, int *cooColCt) { #pragma omp parallel for for (int id = 0; id < H_NBRANCH; id++) { Branch l_branch = branches[id]; cooValCf[id] = make_cuDoubleComplex(1, 0); cooRowCf[id] = l_branch.from + BASE_INDEX; cooColCf[id] = id + BASE_INDEX; cooValCt[id] = make_cuDoubleComplex(1, 0); cooRowCt[id] = l_branch.to + BASE_INDEX; cooColCt[id] = id + BASE_INDEX; } } __host__ void mkl_computeYfYt( Bus *buses, Branch *branches, cuDoubleComplex *csrValYt, int *csrRowPtrYt, int *csrColIndYt, cuDoubleComplex *csrValYf, int *csrRowPtrYf, int *csrColIndYf, cuDoubleComplex *csrValYsh, int *csrRowPtrYsh, int *csrColIndYsh, vector<pso::Particula::Estrutura> estrutura, pso::Particula particula) { #pragma omp parallel for for (int id = 0; id < H_NBRANCH; id++) { if (id < H_NBUS) { Bus l_bus = buses[id]; double Bsh = (l_bus.indiceEstrutura != -1 && estrutura[l_bus.indiceEstrutura].tipo == pso::Particula::Estrutura::SHC) ? particula[l_bus.indiceEstrutura] : l_bus.Bsh ; csrValYsh[id] = make_cuDoubleComplex(l_bus.Gsh, Bsh); csrRowPtrYsh[id] = id + BASE_INDEX; csrColIndYsh[id] = id + BASE_INDEX; } cuDoubleComplex Ytt; cuDoubleComplex Yff; cuDoubleComplex Yft; cuDoubleComplex Ytf; Branch l_branch = branches[id]; int stat = (l_branch.inservice) ? 1 : 0; cuDoubleComplex impedance = make_cuDoubleComplex(l_branch.R, l_branch.X); cuDoubleComplex Ys = cuCdiv(make_cuDoubleComplex(stat, 0), impedance); cuDoubleComplex susceptance = make_cuDoubleComplex(0, l_branch.B); cuDoubleComplex Bc = cuCmul(make_cuDoubleComplex(stat, 0), susceptance); cuDoubleComplex tap = (l_branch.tap != 0) ? make_cuDoubleComplex(particula[l_branch.indiceEstrutura], 0) : make_cuDoubleComplex(1, 0); cuDoubleComplex phase_shifter = make_cuDoubleComplex(0, M_PI / 180.0 * l_branch.shift); tap = cuCmul(tap, cuCexp(phase_shifter)); Ytt = cuCadd(Ys, cuCdiv(Bc, make_cuDoubleComplex(2, 0))); Yff = cuCdiv(Ytt, cuCmul(tap, cuConj(tap))); Yft = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), cuConj(tap)); Ytf = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), tap); int offsetTo, offsetFrom; csrRowPtrYf[id] = id * 2 + BASE_INDEX; offsetTo = (l_branch.from > l_branch.to) ? 0 : 1; offsetFrom = 1 - offsetTo; csrColIndYf[id * 2 + offsetTo] = l_branch.to + BASE_INDEX; csrValYf[id * 2 + offsetTo] = Yft; csrColIndYf[id * 2 + offsetFrom] = l_branch.from + BASE_INDEX; csrValYf[id * 2 + offsetFrom] = Yff; csrRowPtrYt[id] = id * 2 + BASE_INDEX; offsetTo = (l_branch.from > l_branch.to) ? 0 : 1; offsetFrom = 1 - offsetTo; csrColIndYt[id * 2 + offsetTo] = l_branch.to + BASE_INDEX; csrValYt[id * 2 + offsetTo] = Ytt; csrColIndYt[id * 2 + offsetFrom] = l_branch.from + BASE_INDEX; csrValYt[id * 2 + offsetFrom] = Ytf; if(id == (H_NBRANCH -1)){ id++; csrRowPtrYt[id] = id * 2 + BASE_INDEX; csrRowPtrYf[id] = id * 2 + BASE_INDEX; csrRowPtrYsh[H_NBUS] = H_NBUS + BASE_INDEX; } } } /* autor: Igor Araújo * Date : 03/02/2016 * Description: Compute Admittance Matrix using a hybrid approach CPU and GPU, with cuSparse library. * */ __host__ void mkl_makeYbus( vector<pso::Particula::Estrutura> estrutura, pso::Particula particula, Bus* buses, Branch* branches) { // #1 Matrix Cf and Ct is the same to All tests, so compute only once in the first time. // #1.1 Compute Matrix Cf and Ct in Coordinate Format (COO). mkl_computeCfCt( branches, cooValCf, cooRowCf, cooColCf, cooValCt, cooRowCt, cooColCt); // #1.2 Sort Matrix Cf by ROW // #1.3 Convert Matrix Cf in Coordinate Format(COO) to Compressed Sparse Row Format(CSR) int job[6]; /*job - Array, contains the following conversion parameters: job[0] If job[0]=0, the matrix in the CSR format is converted to the coordinate format; if job[0]=1, the matrix in the coordinate format is converted to the CSR format. if job[0]=2, the matrix in the coordinate format is converted to the CSR format, and the column indices in CSR representation are sorted in the increasing order within each row. job[1] If job[1]=0, zero-based indexing for the matrix in CSR format is used; if job[1]=1, one-based indexing for the matrix in CSR format is used. job[2] If job[2]=0, zero-based indexing for the matrix in coordinate format is used; if job[2]=1, one-based indexing for the matrix in coordinate format is used. job[4] job[4]=nzmax - maximum number of the non-zero elements allowed if job[0]=0. job[5] - job indicator. For conversion to the coordinate format: If job[5]=1, only array rowind is filled in for the output storage. If job[5]=2, arrays rowind, colind are filled in for the output storage. If job[5]=3, all arrays rowind, colind, acoo are filled in for the output storage. For conversion to the CSR format: If job[5]=0, all arrays acsr, ja, ia are filled in for the output storage. If job[5]=1, only array ia is filled in for the output storage. If job[5]=2, then it is assumed that the routine already has been called with the job[5]=1, and the user allocated the required space for storing the output arrays acsr and ja. */ job[0] = 2; job[1] = BASE_INDEX; job[2] = BASE_INDEX; job[4] = nnzCf; job[5] = 0; int info; MKL_ZCSRCOO((const int*) &job,(const int*) &H_NBUS, csrValCf, csrColIndCf,csrRowPtrCf, &nnzCf,cooValCf, cooRowCf, cooColCf, &info); if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} // #1.4 Sort Matrix Ct by ROW // #1.5 Convert Matrix Ct in Coordinate Format(COO) to Compressed Sparse Row Format(CSR) job[0] = 2; job[1] = BASE_INDEX; job[2] = BASE_INDEX; job[4] = nnzCt; job[5] = 0; MKL_ZCSRCOO((const int*) &job,(const int*) &H_NBUS, csrValCt, csrColIndCt,csrRowPtrCt, &nnzCt,cooValCt, cooRowCt, cooColCt, &info); if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} // #2 Compute Matrix Yf and Yt mkl_computeYfYt( buses, branches, csrValYt, csrRowPtrYt, csrColIndYt, csrValYf, csrRowPtrYf, csrColIndYf, csrValYsh, csrRowPtrYsh, csrColIndYsh, estrutura, particula); // #3 Compute Admittance Matrix(Ybus) by equation Ybus = Cf * Yf + Ct * Yt + Ysh // #3.1 Compute Cf * Yf from equation { const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = 0; MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCf, csrColIndCf, csrRowPtrCf, csrValYf, csrColIndYf, csrRowPtrYf,csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} nnzCfYf = csrRowPtrCfYf[m] - 1; csrColIndCfYf = (int*) MKL_malloc(sizeof(int) * nnzCfYf, 64); csrValCfYf = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzCfYf, 64); } { const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCfYf; MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCf, csrColIndCf, csrRowPtrCf, csrValYf, csrColIndYf, csrRowPtrYf,csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } { const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCfYf; MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCf, csrColIndCf, csrRowPtrCf, csrValYf, csrColIndYf, csrRowPtrYf,csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } // #3.2 Compute Ct * Yt from equation { const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCtYt; MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCt, csrColIndCt, csrRowPtrCt, csrValYt, csrColIndYt, csrRowPtrYt,csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} nnzCtYt = csrRowPtrCtYt[m] - 1; csrColIndCtYt = (int*) MKL_malloc(sizeof(int) * nnzCtYt, 64); csrValCtYt = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzCtYt, 64); } { const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCtYt; MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCt, csrColIndCt, csrRowPtrCt, csrValYt, csrColIndYt, csrRowPtrYt,csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } { const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCtYt; MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCt, csrColIndCt, csrRowPtrCt, csrValYt, csrColIndYt, csrRowPtrYt,csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } // #3.3 Compute CfYf + CtYt from equation cuDoubleComplex scalar = make_cuDoubleComplex(1.0,0); { const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzCfYfCtYt; MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &scalar, csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} nnzCfYfCtYt = csrRowPtrCfYfCtYt[m] - 1; csrColIndCfYfCtYt = (int*) MKL_malloc(sizeof(int) * nnzCfYfCtYt, 64); csrValCfYfCtYt = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzCfYfCtYt, 64); } { const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzCfYfCtYt; MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &scalar, csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } { const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzCfYfCtYt; MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &scalar, csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } // #3.4 Compute CfYfCtYt + Ysh from equation { const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzYbus; MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &scalar, csrValYsh, csrColIndYsh, csrRowPtrYsh, csrValYbus, csrColIndYbus, csrRowPtrYbus, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} nnzYbus = csrRowPtrYbus[m] - 1; csrColIndYbus = (int*) MKL_malloc(sizeof(int) * nnzYbus, 64); csrValYbus = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzYbus, 64); } { const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzYbus; MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &scalar, csrValYsh, csrColIndYsh, csrRowPtrYsh, csrValYbus, csrColIndYbus, csrRowPtrYbus, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } { const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzYbus; MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &scalar, csrValYsh, csrColIndYsh, csrRowPtrYsh, csrValYbus, csrColIndYbus, csrRowPtrYbus, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);} } MKL_free(csrColIndCfYf); MKL_free(csrValCfYf); MKL_free(csrColIndCtYt); MKL_free(csrValCtYt); MKL_free(csrColIndCfYfCtYt); MKL_free(csrValCfYfCtYt); } __global__ void hybrid_computeCfCt( Branch *branches, cuDoubleComplex *cooValCf, int *cooRowCf, int *cooColCf, cuDoubleComplex *cooValCt, int *cooRowCt, int *cooColCt) { int id = ID(); if (id < D_NBRANCH) { Branch l_branch = branches[id]; cooValCf[id] = make_cuDoubleComplex(1, 0); cooRowCf[id] = l_branch.from; cooColCf[id] = id; cooValCt[id] = make_cuDoubleComplex(1, 0); cooRowCt[id] = l_branch.to; cooColCt[id] = id; // if(id == (D_NBRANCH -1 )){ // id++; // cooColCt[id] = id; // cooColCf[id] = id; // } } } __global__ void hybrid_computeYfYt(Bus *buses, Branch *branches, cuDoubleComplex *csrValYt, int *csrRowPtrYt, int *csrColIndYt, cuDoubleComplex *csrValYf, int *csrRowPtrYf, int *csrColIndYf, cuDoubleComplex *csrValYsh, int *csrRowPtrYsh, int *csrColIndYsh, pso::Particula::Estrutura *d_estrutura, double *d_enxame) { int id = ID(); if (id < D_NBRANCH) { if (id < D_NBUS) { Bus l_bus = buses[id]; double Bsh = (l_bus.indiceEstrutura != -1 && d_estrutura[l_bus.indiceEstrutura].tipo == pso::Particula::Estrutura::SHC) ? d_enxame[l_bus.indiceEstrutura] : l_bus.Bsh ; csrValYsh[id] = make_cuDoubleComplex(l_bus.Gsh, Bsh); csrRowPtrYsh[id] = id; csrColIndYsh[id] = id; } cuDoubleComplex Ytt; cuDoubleComplex Yff; cuDoubleComplex Yft; cuDoubleComplex Ytf; Branch l_branch = branches[id]; int stat = (l_branch.inservice) ? 1 : 0; cuDoubleComplex impedance = make_cuDoubleComplex(l_branch.R, l_branch.X); cuDoubleComplex Ys = cuCdiv(make_cuDoubleComplex(stat, 0), impedance); cuDoubleComplex susceptance = make_cuDoubleComplex(0, l_branch.B); cuDoubleComplex Bc = cuCmul(make_cuDoubleComplex(stat, 0), susceptance); cuDoubleComplex tap = (l_branch.tap != 0) ? ((l_branch.indiceEstrutura != -1) ? make_cuDoubleComplex(d_enxame[l_branch.indiceEstrutura], 0) : make_cuDoubleComplex(l_branch.tap, 0)) : make_cuDoubleComplex(1, 0); cuDoubleComplex phase_shifter = make_cuDoubleComplex(0, M_PI / 180.0 * l_branch.shift); tap = cuCmul(tap, cuCexp(phase_shifter)); Ytt = cuCadd(Ys, cuCdiv(Bc, make_cuDoubleComplex(2, 0))); Yff = cuCdiv(Ytt, cuCmul(tap, cuConj(tap))); Yft = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), cuConj(tap)); Ytf = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), tap); int offsetTo, offsetFrom; csrRowPtrYf[id] = id * 2; offsetTo = (l_branch.from > l_branch.to) ? 0 : 1; offsetFrom = 1 - offsetTo; csrColIndYf[id * 2 + offsetTo] = l_branch.to; csrValYf[id * 2 + offsetTo] = Yft; csrColIndYf[id * 2 + offsetFrom] = l_branch.from; csrValYf[id * 2 + offsetFrom] = Yff; csrRowPtrYt[id] = id * 2; offsetTo = (l_branch.from > l_branch.to) ? 0 : 1; offsetFrom = 1 - offsetTo; csrColIndYt[id * 2 + offsetTo] = l_branch.to; csrValYt[id * 2 + offsetTo] = Ytt; csrColIndYt[id * 2 + offsetFrom] = l_branch.from; csrValYt[id * 2 + offsetFrom] = Ytf; if(id == (D_NBRANCH -1)){ id++; csrRowPtrYt[id] = id * 2; csrRowPtrYf[id] = id * 2; csrRowPtrYsh[D_NBUS] = D_NBUS; } } } /* autor: Igor Araújo * Date : 03/02/2016 * Description: Compute Admittance Matrix using a hybrid approach CPU and GPU, with cuSparse library. * */ __host__ void hybrid_makeYbus( int nTest, int sizeEstrutura, Bus *buses, Branch *branches) { // #1 Matrix Cf and Ct is the same to All tests, so compute only once in the first time. if (nTest == 0) { // #1.1 Compute Matrix Cf and Ct in Coordinate Format (COO). hybrid_computeCfCt<<<BLOCKS(H_NBRANCH, H_THREADS), H_THREADS, 0, stream[nTest]>>>( branches, cooValCf, cooRowCf, cooColCf, cooValCt, cooRowCt, cooColCt); // #1.2 Sort Matrix Cf by ROW size_t before = pBufferSizeInBytes; checkCudaErrors(cusparseXcoosort_bufferSizeExt(sparseHandle, H_NBUS, H_NBRANCH, nnzCfcoo, cooRowCf, cooColCf, &pBufferSizeInBytes)); if(pBufferSizeInBytes > before){ checkCudaErrors(cudaMalloc((void**) &pBuffer , pBufferSizeInBytes * sizeof(char))); } checkCudaErrors(cusparseCreateIdentityPermutation(sparseHandle, nnzCfcoo, permutation)); checkCudaErrors(cusparseXcoosortByRow(sparseHandle, H_NBUS, H_NBRANCH, nnzCfcoo, cooRowCf, cooColCf, permutation, pBuffer)); checkCudaErrors(cusparseZgthr(sparseHandle, nnzCfcoo, cooValCf, csrValCf, permutation, CUSPARSE_INDEX_BASE_ZERO)); // #1.3 Convert Matrix Cf in Coordinate Format(COO) to Compressed Sparse Row Format(CSR) checkCudaErrors(cusparseXcoo2csr(sparseHandle, (const int*) cooRowCf, nnzCf, H_NBUS, csrRowPtrCf, CUSPARSE_INDEX_BASE_ZERO)); checkCudaErrors(cudaMemcpy(csrColIndCf, cooColCf, nnzCf * sizeof(int), cudaMemcpyDeviceToDevice)); // #1.4 Sort Matrix Ct by ROW before = pBufferSizeInBytes; checkCudaErrors(cusparseXcoosort_bufferSizeExt(sparseHandle, H_NBUS, H_NBRANCH, nnzCtcoo, cooRowCt, cooColCt, &pBufferSizeInBytes)); if(pBufferSizeInBytes > before){ checkCudaErrors(cudaMalloc((void**) &pBuffer , pBufferSizeInBytes * sizeof(char))); } checkCudaErrors(cusparseCreateIdentityPermutation(sparseHandle, nnzCtcoo, permutation)); checkCudaErrors(cusparseXcoosortByRow(sparseHandle, H_NBUS, H_NBRANCH, nnzCtcoo, cooRowCt, cooColCt, permutation, pBuffer)); checkCudaErrors(cusparseZgthr(sparseHandle, nnzCtcoo, cooValCt, csrValCt, permutation, CUSPARSE_INDEX_BASE_ZERO)); // #1.5 Convert Matrix Ct in Coordinate Format(COO) to Compressed Sparse Row Format(CSR) checkCudaErrors(cusparseXcoo2csr(sparseHandle, (const int*) cooRowCt, nnzCt, H_NBUS, csrRowPtrCt, CUSPARSE_INDEX_BASE_ZERO)); checkCudaErrors(cudaMemcpy(csrColIndCt, cooColCt, nnzCt * sizeof(int), cudaMemcpyDeviceToDevice)); } // #2 Compute Matrix Yf and Yt hybrid_computeYfYt<<<BLOCKS(H_NBRANCH, H_THREADS), H_THREADS, 0, stream[nTest]>>>( buses, branches, csrValYt + nnzYt * nTest, csrRowPtrYt, csrColIndYt, csrValYf + nnzYf * nTest, csrRowPtrYf, csrColIndYf, csrValYsh + nnzYsh * nTest, csrRowPtrYsh, csrColIndYsh, d_estrutura, d_enxame + nTest * sizeEstrutura); // #3 Compute Admittance Matrix(Ybus) by equation Ybus = Cf * Yf + Ct * Yt + Ysh // #3.1 Compute Cf * Yf from equation if(nTest == 0) { checkCudaErrors(cusparseCreateMatDescr(&descrCf)); checkCudaErrors(cusparseCreateMatDescr(&descrYf)); checkCudaErrors(cusparseCreateMatDescr(&descrCfYf)); checkCudaErrors(cusparseSetMatType(descrCf, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseSetMatType(descrYf, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseSetMatType(descrCfYf, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseXcsrgemmNnz(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCf, nnzCf, csrRowPtrCf, csrColIndCf, descrYf, nnzYf, csrRowPtrYf, csrColIndYf, descrCfYf, csrRowPtrCfYf, &nnzCfYf)); checkCudaErrors(cudaMalloc((void**)&csrColIndCfYf , sizeof(int) * nnzCfYf)); checkCudaErrors(cudaMalloc((void**)&csrValCfYf , sizeof(cuDoubleComplex) * nnzCfYf)); checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCf, nnzCf, csrValCf, csrRowPtrCf, csrColIndCf, descrYf, nnzYf, csrValYf, csrRowPtrYf, csrColIndYf, descrCfYf, csrValCfYf, csrRowPtrCfYf, csrColIndCfYf)); } else { checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCf, nnzCf, csrValCf, csrRowPtrCf, csrColIndCf, descrCfYf, nnzYf, csrValYf + nnzYf * nTest, csrRowPtrYf, csrColIndYf, descrCfYf, csrValCfYf, csrRowPtrCfYf, csrColIndCfYf)); } // #3.2 Compute Ct * Yt from equation if(nTest == 0) { checkCudaErrors(cusparseCreateMatDescr(&descrCt)); checkCudaErrors(cusparseCreateMatDescr(&descrYt)); checkCudaErrors(cusparseCreateMatDescr(&descrCtYt)); checkCudaErrors(cusparseSetMatType(descrCt, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseSetMatType(descrYt, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseSetMatType(descrCtYt, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseXcsrgemmNnz(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCt, nnzCt, csrRowPtrCt, csrColIndCt, descrYt, nnzYt, csrRowPtrYt, csrColIndYt, descrCtYt, csrRowPtrCtYt, &nnzCtYt)); checkCudaErrors(cudaMalloc((void**)&csrColIndCtYt , sizeof(int) * nnzCtYt)); checkCudaErrors(cudaMalloc((void**)&csrValCtYt , sizeof(cuDoubleComplex) * nnzCtYt)); checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCt, nnzCt, csrValCt, csrRowPtrCt, csrColIndCt, descrYt, nnzYt, csrValYt, csrRowPtrYt, csrColIndYt, descrCtYt, csrValCtYt, csrRowPtrCtYt, csrColIndCtYt)); } else { checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCt, nnzCt, csrValCt, csrRowPtrCt, csrColIndCt, descrCtYt, nnzYt, csrValYt + nnzYt * nTest, csrRowPtrYt, csrColIndYt, descrCtYt, csrValCtYt, csrRowPtrCtYt, csrColIndCtYt)); } // #3.3 Compute CfYf + CtYt from equation if(nTest == 0) { checkCudaErrors(cusparseCreateMatDescr(&descrCfYfCtYt)); checkCudaErrors(cusparseSetMatType(descrCfYfCtYt, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseXcsrgeamNnz(sparseHandle, H_NBUS, H_NBUS, descrCfYf, nnzCfYf, csrRowPtrCfYf, csrColIndCfYf, descrCtYt, nnzCtYt, csrRowPtrCtYt, csrColIndCtYt, descrCfYfCtYt, csrRowPtrCfYfCtYt, &nnzCfYfCtYt)); checkCudaErrors(cudaMalloc((void**)&csrColIndCfYfCtYt , sizeof(int) * nnzCfYfCtYt)); checkCudaErrors(cudaMalloc((void**)&csrValCfYfCtYt , sizeof(cuDoubleComplex) * nnzCfYfCtYt)); cuDoubleComplex fator = make_cuDoubleComplex(1,0); checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYf, nnzCfYf, (const cuDoubleComplex*)csrValCfYf, csrRowPtrCfYf, csrColIndCfYf, &fator, descrCtYt, nnzCtYt,(const cuDoubleComplex*) csrValCtYt, csrRowPtrCtYt, csrColIndCtYt, descrCfYfCtYt, csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt)); } else { cuDoubleComplex fator = make_cuDoubleComplex(1,0); checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYf, nnzCfYf, (const cuDoubleComplex*)csrValCfYf, csrRowPtrCfYf, csrColIndCfYf, &fator, descrCtYt, nnzCtYt,(const cuDoubleComplex*) csrValCtYt, csrRowPtrCtYt, csrColIndCtYt, descrCfYfCtYt, csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt)); } // #3.4 Compute CfYfCtYt + Ysh from equation if(nTest == 0) { checkCudaErrors(cusparseCreateMatDescr(&descrYsh)); checkCudaErrors(cusparseCreateMatDescr(&descrYbus)); checkCudaErrors(cusparseSetMatType(descrYsh, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseSetMatType(descrYbus, CUSPARSE_MATRIX_TYPE_GENERAL)); checkCudaErrors(cusparseXcsrgeamNnz(sparseHandle, H_NBUS, H_NBUS, descrCfYfCtYt, nnzCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt, descrYsh, nnzYsh, csrRowPtrYsh, csrColIndYsh, descrYbus, csrRowPtrYbus, &nnzYbus)); checkCudaErrors(cudaMalloc((void**)&csrColIndYbus , sizeof(int) * nnzYbus )); checkCudaErrors(cudaMalloc((void**)&csrValYbus , sizeof(cuDoubleComplex) * nnzYbus * H_NTESTS)); cuDoubleComplex fator = make_cuDoubleComplex(1,0); checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYfCtYt, nnzCfYfCtYt, (const cuDoubleComplex*)csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt, &fator, descrYsh, nnzYsh,(const cuDoubleComplex*) csrValYsh, csrRowPtrYsh, csrColIndYsh, descrYbus, csrValYbus, csrRowPtrYbus, csrColIndYbus)); } else { cuDoubleComplex fator = make_cuDoubleComplex(1,0); checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYfCtYt, nnzCfYfCtYt, (const cuDoubleComplex*)csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt, &fator, descrYsh, nnzYsh,(const cuDoubleComplex*) (csrValYsh + nnzYsh * nTest), csrRowPtrYsh, csrColIndYsh, descrYbus, csrValYbus + nnzYbus * nTest, csrRowPtrYbus, csrColIndYbus)); } } #endif /* MAKEYBUS_CUH_ */
ex4-matrix-product-openmp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define VALIDATE 0 #if VALIDATE #include "validate.h" #endif void mat_prod(const size_t n, const int * restrict, const int * restrict, int * restrict); void usage(char**); int main(int argc, char **argv) { int *A,*B,*C; size_t i,n; double t0,t1; if(argc==2) sscanf(argv[1],"%zu",&n); else { usage(argv); return 1; } A = (int*)malloc(n*n*sizeof(int)); B = (int*)malloc(n*n*sizeof(int)); C = (int*)calloc(n*n,sizeof(int)); for(i=0; i<n*n; ++i) A[i]=B[i]=i%100; t0 = omp_get_wtime(); mat_prod(n,A,B,C); t1 = omp_get_wtime(); #if VALIDATE if(!validate_mat_prod(n,A,B,C)) { printf("Validation failed.\n"); return 1; } #endif printf("Total time taken: %f.\n",t1-t0); free(A); free(B); free(C); return 0; } void mat_prod(const size_t n, const int * restrict A, const int * restrict B, int * restrict C) { size_t i,j,k; #pragma omp parallel for default(none) shared(A,B,C) private(i,j,k) for(i=0; i<n; ++i) for(k=0; k<n; ++k) for(j=0; j<n; ++j) C[i*n+j]+=A[i*n+k]*B[k*n+j]; } void usage(char **argv) { printf("Usage: %s <length>\n",argv[0]); }
test.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/types.h> #include <sys/wait.h> #include <unistd.h> #include <string.h> int main() { omp_set_dynamic(0); #pragma omp parallel num_threads(4) { int currentThread = omp_get_thread_num(); if(currentThread == 0) { printf("Master!\n"); int i = 0; while(i < 100000) { i++; } printf("Master Completed!\n"); } else { printf("Slave!\n"); int pid = fork(); if(pid == 0) { char* args[2]; args[0] = "test2"; args[1] = NULL; if(execvp("./test2",args) == -1) { fprintf(stderr, "ERROR: failed to run execvp\n"); } exit(-1); } else { while(1) { int status; int id = waitpid(pid,&status, WNOHANG | WUNTRACED); if(id == -1) { //exit(EXIT_FAILURE); printf("Exiting...\n"); exit(-1); } else if(id == 0) // child still running { } else if(WIFEXITED(status)) { printf("Terminated normally\n"); break; } else if(WIFSIGNALED(status)) { printf("Unexpected error\n"); exit(-2); } } } } } return 0; }
GB_binop__pair_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_fc64 // A.*B function (eWiseMult): GB_AemultB__pair_fc64 // A*D function (colscale): GB_AxD__pair_fc64 // D*A function (rowscale): GB_DxB__pair_fc64 // C+=B function (dense accum): GB_Cdense_accumB__pair_fc64 // C+=b function (dense accum): GB_Cdense_accumb__pair_fc64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_fc64 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GxB_CMPLX(1,0) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GxB_CMPLX(1,0) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FC64 || GxB_NO_PAIR_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_fc64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_fc64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = GxB_CMPLX(1,0) ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = GxB_CMPLX(1,0) ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = GxB_CMPLX(1,0) ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = GxB_CMPLX(1,0) ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unop__identity_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int8_uint16 // op(A') function: GB_unop_tran__identity_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int8_uint16 ( int8_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
b4soild.c
/*** B4SOI 12/16/2010 Released by Tanvir Morshed ***/ /********** * Copyright 2010 Regents of the University of California. All rights reserved. * Authors: 1998 Samuel Fung, Dennis Sinitsky and Stephen Tang * Authors: 1999-2004 Pin Su, Hui Wan, Wei Jin, b3soild.c * Authors: 2005- Hui Wan, Xuemei Xi, Ali Niknejad, Chenming Hu. * Authors: 2009- Wenwei Yang, Chung-Hsun Lin, Ali Niknejad, Chenming Hu. * File: b4soild.c * Modified by Hui Wan, Xuemei Xi 11/30/2005 * Modified by Wenwei Yang, Chung-Hsun Lin, Darsen Lu 03/06/2009 * Modified by Tanvir Morshed 09/22/2009 * Modified by Tanvir Morshed 12/31/2009 * Modified by Larry Wagner, Calvin Bittner, Geoffrey Coram, Tanvir Morshed 05/14/2010 * Modified by Larry Wagner, Calvin Bittner, 5 fixes. 08/04/2010 * Modified by Larry Wagner, Calvin Bittner, FD derivatives fixes. 08/25/2010 **********/ #include "ngspice/ngspice.h" #include "ngspice/cktdefs.h" #include "b4soidef.h" #include "ngspice/trandefs.h" #include "ngspice/const.h" #include "ngspice/sperror.h" #include "ngspice/devdefs.h" #include "ngspice/suffix.h" #define EPS0 8.85418e-12 /*4.1*/ #define EPSOX 3.453133e-11 #define EPSSI 1.03594e-10 #define Charge_q 1.60219e-19 #define KboQ 8.617087e-5 /* Kb / q */ #define Eg300 1.115 /* energy gap at 300K */ #define DELTA 1.0E-9 /* v4.0 */ #define DELTA_1 0.02 #define DELTA_2 0.02 #define DELTA_3 0.02 /* Original is 0.02, for matching IBM model, change to 0.08 */ #define DELTA_3_SOI 0.08 #define DELTA_4 0.02 #define DELT_Vbseff 0.005 #define DELTA_VFB 0.02 #define OFF_Vbsitf 0.02 /* v3.1*/ #define CONST_2OV3 0.6666666666 #define MAX_EXPL 2.688117142e+43 #define MIN_EXPL 3.720075976e-44 #define EXPL_THRESHOLD 100.0 #define DEXP(A,B,C) { \ if (A > EXPL_THRESHOLD) { \ B = MAX_EXPL*(1.0+(A)-EXPL_THRESHOLD); \ C = MAX_EXPL; \ } else if (A < -EXPL_THRESHOLD) { \ B = MIN_EXPL; \ C = 0; \ } else { \ B = exp(A); \ C = B; \ } \ } #define FLOG(A) fabs(A) + 1e-14 #ifdef USE_OMP int B4SOILoadOMP(B4SOIinstance *here, CKTcircuit *ckt); void B4SOILoadRhsMat(GENmodel *inModel, CKTcircuit *ckt); #endif /* B4SOIlimit(vnew,vold) * limits the per-iteration change of any absolute voltage value */ static double B4SOIlimit( double vnew, double vold, double limit, int *check) { double T0, T1; if (isnan (vnew) || isnan (vold)) { fprintf(stderr, "Alberto says: YOU TURKEY! The limiting function received NaN.\n"); fprintf(stderr, "New prediction returns to 0.0!\n"); vnew = 0.0; *check = 1; } T0 = vnew - vold; T1 = fabs(T0); if (T1 > limit) { if (T0 > 0.0) vnew = vold + limit; else vnew = vold - limit; *check = 1; } return vnew; } int B4SOIload( GENmodel *inModel, CKTcircuit *ckt) { #ifdef USE_OMP int idx; B4SOImodel *model = (B4SOImodel*)inModel; int error = 0; B4SOIinstance **InstArray; InstArray = model->B4SOIInstanceArray; #pragma omp parallel for for (idx = 0; idx < model->B4SOIInstCount; idx++) { B4SOIinstance *here = InstArray[idx]; int local_error = B4SOILoadOMP(here, ckt); if (local_error) error = local_error; } B4SOILoadRhsMat(inModel, ckt); return error; } int B4SOILoadOMP(B4SOIinstance *here, CKTcircuit *ckt) { B4SOImodel *model = B4SOImodPtr(here); #else register B4SOImodel *model = (B4SOImodel*)inModel; register B4SOIinstance *here; #endif register int selfheat; double Gmin; double dVgstNVt_dT, dVgstNVt2_dT; /* LFW_FD */ double ag0, qgd, qgs, von, cbhat, VgstNVt, ExpVgst, dExpVgst_dT; /* enhanced line Wagner */ /* LFW_FD next 4 lines */ double dVgstNVt_dVg, dVgstNVt_dVd, dVgstNVt_dVb, dVgstNVt_dVe; double dExpVgst_dVg, dExpVgst_dVd, dExpVgst_dVb, dExpVgst_dVe, dVgstNVt2_dVg; double dVgstNVt2_dVd, dVgstNVt2_dVb, dVgstNVt2_dVe, dExpArg2_dVd, dExpArg2_dVb, dExpArg2_dVe; double dExpVgst2_dVg, dExpVgst2_dVd, dExpVgst2_dVb, dExpVgst2_dVe; double cdhat, cdreq, ceqbd, ceqbs, ceqqb, ceqqd, ceqqg, ceq, geq; double arg; double delvbd, delvbs, delvds, delvgd, delvgs; double Vfbeff, dVfbeff_dVg, dVfbeff_dVd, dVfbeff_dVe, dVfbeff_dVb, V3, V4; double PhiBSWG, MJSWG; double gcgdb, gcggb, gcgsb, gcgeb, gcgT; double gcsdb, gcsgb, gcssb, gcseb, gcsT; double gcddb, gcdgb, gcdsb, gcdeb, gcdT; double gcbdb, gcbgb, gcbsb, gcbeb, gcbT; double gcedb, gcegb, gcesb, gceeb, gceT; double gcTt, gTtg, gTtb, gTtdp, gTtt, gTtsp; double vbd, vbs, vds, vgb, vgd, vgs, vgdo; #ifndef PREDICTOR double xfact; #endif double vg, vd, vs, vp, ve, vb; double Vds, Vgs, Vbs, Gmbs, FwdSum, RevSum; double Vgs_eff, Vfb, dVfb_dVb, dVfb_dVd, dVfb_dVg, dVfb_dVe, dVfb_dT; double Phis, sqrtPhis, dsqrtPhis_dVd, dsqrtPhis_dVe, dsqrtPhis_dVb, dsqrtPhis_dVg; double Vth, dVth_dVb, dVth_dVd, dVth_dVg, dVth_dVe, dVth_dT; double Vgst, dVgst_dVg, dVgst_dVd, dVgst_dVb, dVgst_dVe, dVgst_dT, dVgs_eff_dVg, dVgs_eff_dT; double n, dn_dVb, dn_dVe, dn_dVg, Vtm; double ExpArg, dExpArg_dVg, dExpArg_dVd, dExpArg_dVb, dExpArg_dVe, dExpArg_dT, dExpArg2_dVg, dExpArg2_dT, V0; double ueff, dueff_dVg, dueff_dVd, dueff_dVb, dueff_dVe, dueff_dT; double Esat, Vdsat; double EsatL, dEsatL_dVg, dEsatL_dVd, dEsatL_dVb, dEsatL_dVe, dEsatL_dT; double dVdsat_dVg, dVdsat_dVb, dVdsat_dVd, dVdsat_dVe, dVdsat_dT, Vasat; double dVasat_dVg, dVasat_dVb, dVasat_dVd, dVasat_dVe, dVasat_dT; double Va, dVa_dVd, dVa_dVg, dVa_dVb, dVa_dVe, dVa_dT; double Vbseff, dVbseff_dVb; double CoxWL; double T0=0.0, dT0_dVg, dT0_dVd, dT0_dVe, dT0_dVb, dT0_dT; double T1, dT1_dVg, dT1_dVd, dT1_dVb, dT1_dT; double T2, dT2_dVg, dT2_dVd, dT2_dVb, dT2_dT, dT2_dVp; double T3, dT3_dVg, dT3_dVd, dT3_dVe, dT3_dVb, dT3_dT; double T4, dT4_dVe, dT4_dVg, dT4_dVd, dT4_dVb, dT4_dT; double T5, dT5_dVg, dT5_dVd, dT5_dVb, dT5_dT; double T6, dT6_dVg, dT6_dVd, dT6_dVe, dT6_dVb, dT6_dT, dT6_dVp; double T7, dT7_dVg, dT7_dVb, dT7_dVd, dT7_dVe; double T8, dT8_dVd; double T9, dT9_dVd; double T10, dT10_dVb, dT10_dVd; double T11, T12; /* LFW_FD 2 new lines */ double dT02_dVg, dT02_dVd, dT02_dVb, dT02_dVe, dT12_dVg, dT12_dVd, dT12_dVb, dT12_dVe; double dT22_dVg, dT22_dVd, dT22_dVb, dT22_dVe; double dTL0_dT, TL1, dTL1_dT, TL2, dTL2_dT, TL3, dTL3_dT, TL4, dTL4_dT, dTL5_dT; /* new line Wagner */ /* LFW_FD 1 new line */ double dTL1_dVg, dTL1_dVd, dTL1_dVb, dTL1_dVe; double dTL6_dT, dTL7_dT, dTL8_dT, dTL9_dT; /* new line Wagner */ double tmp, Abulk, dAbulk_dVb, Abulk0, dAbulk0_dVg, dAbulk0_dVb, dAbulk0_dVd, dAbulk0_dVe; double dAbulk_dT, dAbulk0_dT, dAbulkCV_dT; /* new line Wagner */ double VACLM, dVACLM_dVg, dVACLM_dVd, dVACLM_dVb, dVACLM_dVe, dVACLM_dT; double VADIBL, dVADIBL_dVg, dVADIBL_dVd, dVADIBL_dVb, dVADIBL_dVe, dVADIBL_dT; double Xdep, dXdep_dVd, dXdep_dVe, dXdep_dVb, dXdep_dVg; double lt1, dlt1_dVd, dlt1_dVe, dlt1_dVb, dlt1_dVg; double ltw, dltw_dVd, dltw_dVe, dltw_dVb, dltw_dVg; double Delt_vth, dDelt_vth_dVd, dDelt_vth_dVe, dDelt_vth_dVb, dDelt_vth_dVg, dDelt_vth_dT; double Theta0, dTheta0_dVd, dTheta0_dVe, dTheta0_dVb, dTheta0_dVg; double TempRatio, tmp1, tmp2, tmp3, tmp4; double DIBL_Sft, dDIBL_Sft_dVd, Lambda, dLambda_dVg, dLambda_dVb, dLambda_dVd, dLambda_dVe; double dLambda_dT; /* new line Wagner */ double a1; double Vgsteff, dVgsteff_dVg, dVgsteff_dVd, dVgsteff_dVb, dVgsteff_dT; double Vdseff, dVdseff_dVg, dVdseff_dVd, dVdseff_dVb, dVdseff_dVe, dVdseff_dT; double VdseffCV, dVdseffCV_dVg, dVdseffCV_dVd, dVdseffCV_dVb, dVdseffCV_dVe; double dVdseffCV_dT; /* new line Wagner */ double diffVds; double dAbulk_dVg, dAbulk_dVd, dAbulk_dVe, dn_dVd ; double beta, dbeta_dVg, dbeta_dVd, dbeta_dVb, dbeta_dVe, dbeta_dT; double gche, dgche_dVg, dgche_dVd, dgche_dVb, dgche_dVe, dgche_dT; double fgche1, dfgche1_dVg, dfgche1_dVd, dfgche1_dVb, dfgche1_dVe, dfgche1_dT; double fgche2, dfgche2_dVg, dfgche2_dVd, dfgche2_dVb, dfgche2_dVe, dfgche2_dT; double Idl, dIdl_dVg, dIdl_dVd, dIdl_dVb, dIdl_dVe, dIdl_dT; double Ids, Gm, Gds, Gmb, dIds_dVg, dIds_dVb, dIds_dVd, dIds_dVe, dIds_dT; double CoxWovL; double Rds, dRds_dVg, dRds_dVb, dRds_dVd, dRds_dVe, dRds_dT, WVCox, WVCoxRds, dWVCoxRds_dT; double dWVCoxRds_dVg, dWVCoxRds_dVb, dWVCoxRds_dVd, dWVCoxRds_dVe; double Vgst2Vtm, dVgst2Vtm_dT, VdsatCV, dVdsatCV_dVg, dVdsatCV_dVd, dVdsatCV_dVb, dVdsatCV_dVe, dVdsatCV_dT; double Leff, Weff, dWeff_dVg, dWeff_dVb, dWeff_dVd, dWeff_dVe, dWeff_dT; double AbulkCV, dAbulkCV_dVg, dAbulkCV_dVb, dAbulkCV_dVd, dAbulkCV_dVe; double qgdo, qgso, cgdo, cgso; double dxpart, sxpart; struct b4soiSizeDependParam *pParam; int ByPass, Check, ChargeComputationNeeded=0, error; double gbbsp, gbbdp, gbbg, gbbb, gbbp, gbbT; double gddpsp, gddpdp, gddpg, gddpb, gddpT; double gsspsp, gsspdp, gsspg, gsspb, gsspT; double Gbpbs=0.0, Gbpps; double ves, ved, veb, vge=0.0, delves, vedo, delved; double vps, vpd, Vps, delvps; double Vbd, Ves, Vesfb; double DeltVthtemp, dDeltVthtemp_dVd, dDeltVthtemp_dVe, dDeltVthtemp_dVb, dDeltVthtemp_dVg, dDeltVthtemp_dT; double Vbp, dVbp_dVb; double DeltVthw, dDeltVthw_dVd, dDeltVthw_dVe, dDeltVthw_dVb, dDeltVthw_dVg, dDeltVthw_dT; double Gm0, Gds0, Gmb0, Gme0, GmT0, GmT; double dDIBL_Sft_dVg, dDIBL_Sft_dVe, dDIBL_Sft_dVb; double Igidl, Ggidld=0.0, Ggidlg, Ggidlb=0.0, Ggidlt; /* enhanced line Wagner */ double Igisl, Ggisls=0.0, Ggislg, Ggislb=0.0, Ggislt; /* enhanced line Wagner */ double Gjsd, Gjsb=0.0, GjsT, Gjdd, Gjdb=0.0, GjdT; double Ibp, Iii, Giid=0.0, Giig, Giib=0.0, GiiT, Gcd, Gcb, GcT, ceqbody, ceqbodcon; double gppb, gppp; double delTemp, deldelTemp, Temp; double ceqth, ceqqth; double K1; double qjs=0.0, gcjsbs=0.0, gcjsT; double qjd=0.0, gcjdbs=0.0, gcjdT; double qge; double ceqqe; double ni, Eg, Cbox, CboxWL; double dEg_dT; /* new line Wagner */ double cjsbs; double dVfbeff_dVrg; double qinv, qgate=0.0, qbody=0.0, qdrn=0.0, qsrc, qsub=0.0, cqgate, cqbody, cqdrn, cqsub, cqtemp; double qgate1; /* new line Wagner */ double Cgg, Cgd, Cgb, Cge; double Csg, Csd, Csb, Cse, Cbg, Cbd, Cbb, Cbe; double Cgg1, Cgb1, Cgd1, Cge1, Cbg1, Cbb1, Cbd1, Cbe1, Csg1, Csd1, Csb1, Cse1; double Vdsatii; double Ibs1 ,dIbs1_dVb ,dIbs1_dT; double Ibs2 ,dIbs2_dVb ,dIbs2_dT; double Ibs3 ,dIbs3_dVb ,dIbs3_dVd, dIbs3_dT; double Ibs4 ,dIbs4_dVb ,dIbs4_dT; double Ibd1 ,dIbd1_dVb ,dIbd1_dVd ,dIbd1_dT; double Ibd2 ,dIbd2_dVb ,dIbd2_dVd ,dIbd2_dT; double Ibd3 ,dIbd3_dVb ,dIbd3_dVd ,dIbd3_dT; double Ibd4 ,dIbd4_dVb ,dIbd4_dVd ,dIbd4_dT; double WTsi, NVtm1, NVtm2; double Ic ,dIc_dVb ,dIc_dVd; double Ibs; double Ibd; double Denomi ,dDenomi_dVg, dDenomi_dVd, dDenomi_dVb, dDenomi_dVe, dDenomi_dT; double Qsub0 ,dQsub0_dVg ,dQsub0_dVb ,dQsub0_dVd, dQsub0_dVe ; double dqgate_dT, dqgate2_dT, dqbulk_dT, dqsrc_dT, dqdrn_dT, dqbody_dT, dqsub_dT; /* new line Wagner */ double Qac0 ,dQac0_dVb ,dQac0_dVd; double Qe1 , dQe1_dVb, dQe1_dVe, dQe1_dT; double Ce1b ,Ce1e, Ce1T; double dQac0_dVrg, dQsub0_dVrg; /* for self-heating */ double vbi, vfbb, phi, sqrtPhi, Xdep0, u0temp, vsattemp; double jdifs, jdifd, djdifs_dT, djdifd_dT; double jbjts, jbjtd, djbjts_dT, djbjtd_dT; double jrecs, jrecd, djrecs_dT, djrecd_dT; double jtuns, jtund, djtuns_dT, djtund_dT; double rds0=0.0, ua, ub, uc; double dvbi_dT, dvfbb_dT, du0temp_dT; double dvsattemp_dT, drds0_dT=0.0, dua_dT, dub_dT, duc_dT, dni_dT, dVtm_dT; double dVfbeff_dT, dQac0_dT, dQsub0_dT; double CbT, CsT, CgT; double CdT; /* new line Wagner */ double rho, rho_ref, ku0temp; /* v4.0 */ double drho_dT, drho_ref_dT, dku0temp_dT; /* v4.0 */ /* v2.0 release */ double Vbsh, dVbsh_dVb; double sqrtPhisExt, dsqrtPhisExt_dVd, dsqrtPhisExt_dVe, dsqrtPhisExt_dVb, dsqrtPhisExt_dVg; double T13, T14; double dT14_dT; /* new line Wagner */ double dT11_dVb, dT11_dVd, dT11_dVe, dT13_dVb, dT13_dVd, dT13_dVe, dT14_dVb, dT14_dVd, dT14_dVe, dT13_dVg, dT14_dVg; double Vdsatii0, dVdsatii0_dT; double VgsStep, dVgsStep_dT, Ratio, dRatio_dVg, dRatio_dVb, dRatio_dVd, dRatio_dT, dTempRatio_dT; double Vdiff, dVdiff_dVg, dVdiff_dVb, dVdiff_dVd, dVdiff_dT; double dNVtm1_dT; double NVtmf, NVtmr, dNVtmf_dT, dNVtmr_dT; double TempRatioMinus1; double Ahlis, dAhlis_dT, Ahlid, dAhlid_dT ; double WsTsi, WdTsi; double dPhiBSWG_dT, dcjsbs_dT, darg_dT, ddT3_dVb_dT; double dT7_dT, dT0_dT7, dT1_dT7, dT2_dT7; double CoxWLb, CoxWLcenb; double ExpVbsNVtm, dExpVbsNVtm_dVb, dExpVbsNVtm_dT; double ExpVbdNVtm, dExpVbdNVtm_dVb, dExpVbdNVtm_dVd, dExpVbdNVtm_dT; double Ien, dIen_dT, Iendif, dIendif_dT; double Ibsdif, dIbsdif_dVb, dIbsdif_dT; double Ibddif, dIbddif_dVb, dIbddif_dT; double Ehlis, dEhlis_dVb, dEhlis_dT; double EhlisFactor, dEhlisFactor_dVb, dEhlisFactor_dT; double Ehlid, dEhlid_dVb, dEhlid_dVd, dEhlid_dT; double EhlidFactor, dEhlidFactor_dVb, dEhlidFactor_dT; double E2ndFactor, dE2ndFactor_dVb, dE2ndFactor_dVd, dE2ndFactor_dT; double dT10_dT, dT11_dT, dT13_dT, DioMax; /* LFW_FD enhance line */ double cjdbs, dcjdbs_dT; double wdios, wdiod; /* for capMod3 */ double Cox, Tox, Tcen, dTcen_dVg, dTcen_dVb, LINK, Ccen, Coxeff, dCoxeff_dVg, dCoxeff_dVb; double CoxWLcen, QovCox, dQac0_dVg, dQac0_dVe, DeltaPhi, dDeltaPhi_dVg, dDeltaPhi_dT; double dDeltaPhi_dVd, dDeltaPhi_dVb, dDeltaPhi_dVe; double dTcen_dVd, dTcen_dVe, dTcen_dT, dCoxeff_dVd, dCoxeff_dT, dCoxWLcenb_dT, qinoi, qbulk, qbulk1; double dCoxeff_dVe; double T3zb, lt1zb, ltwzb, Theta0zb; double Delt_vthzb, dDelt_vthzb_dT; double DeltVthwzb, dDeltVthwzb_dT; double DeltVthtempzb, dDeltVthtempzb_dT; double Vthzb, dVthzb_dT, Vfbzb, dVfbzb_dT; /* v3.2 */ double noff, dnoff_dVg, dnoff_dVd, dnoff_dVb, dnoff_dVe; double dnoff_dT; /* new line Wagner */ double vgmb; /* v3.1 added for RF */ double geltd, gcrg, gcrgg, gcrgd, gcrgs, gcrgb, ceqgcrg; double vges, vgms, vgedo, vgmdo, vged, vgmd, delvged, delvgmd; double delvges, delvgms, vgme; double gcgmgmb=0.0, gcgmdb, gcgmsb, gcdgmb, gcsgmb; double gcgmeb, gcegmb, qgme, qgmid=0.0, ceqqgmid; double gcgbb; double vgge, vggm; /* v3.0 */ double Igc, dIgc_dVg, dIgc_dVd, dIgc_dVe, dIgc_dVb, Igs, dIgs_dVg, dIgs_dVs, Igd, dIgd_dVg, dIgd_dVd; double Igcs, dIgcs_dVg, dIgcs_dVd, dIgcs_dVb, dIgcs_dVe, Igcd, dIgcd_dVg, dIgcd_dVd, dIgcd_dVb, dIgcd_dVe; double dIgc_dT, dIgcs_dT, dIgcd_dT; /* new line Wagner */ double vgs_eff, dvgs_eff_dvg, vgd_eff, dvgd_eff_dvg; double VxNVt, ExpVxNVt; double dVxNVt_dT; /* new line Wagner */ double gIstotg, gIstotd, gIstotb, gIstots, Istoteq; double gIdtotg, gIdtotd, gIdtotb, gIdtots, Idtoteq; double gIgtotg, gIgtotd, gIgtotb, gIgtots, Igtoteq; /* v3.0 */ double Vbsitf, dVbsitf_dVg, dVbsitf_dVd, dVbsitf_dVb, dVbsitf_dVe, dVbsitf_dT, dVbs_dVb; double dVbs_dVg, dVbs_dVd, dVbs_dVe, dVbs_dT; double dIgb1_dVe, Giie, dRatio_dVe, dVdiff_dVe; double dT1_dVe, dT5_dVe, dIgb_dVe, dVox_dVe, dVoxdepinv_dVe, dVaux_dVe; double Gme, gTte, gbbe, gddpe, gsspe; double Vbs0, dVbs0_dVg, dVbs0_dVd, dVbs0_dVe, dVbs0_dT; double Vbs0mos, dVbs0mos_dVe, dVbs0mos_dT; double Vbsmos, dVbsmos_dVg, dVbsmos_dVd, dVbsmos_dVb, dVbsmos_dVe, dVbsmos_dT; double PhiON, dPhiON_dVg, dPhiON_dVd, dPhiON_dVe, dPhiON_dT; double PhiFD, dPhiFD_dVg, dPhiFD_dVd, dPhiFD_dVe, dPhiFD_dT; double Vbs0t, dVbs0t_dVg, dVbs0t_dVd, dVbs0t_dVe, dVbs0t_dT; double VthFD, dVthFD_dVd, dVthFD_dVb, dVthFD_dVe, dVthFD_dT; double VtgsFD, ExpVtgsFD, VgstFD, ExpVgstFD; double VtgseffFD, dVtgseffFD_dVd, dVtgseffFD_dVg, dVtgseffFD_dVe, dVtgseffFD_dT; double VgsteffFD, dVgsteffFD_dVd, dVgsteffFD_dVg, dVgsteffFD_dVe, dVgsteffFD_dT; double dT2_dVe, dVbsh_dVg, dVbsh_dVd, dVbsh_dVe, dVbsh_dT; double dVgsteff_dVe, dVbseff_dVg, dVbseff_dVd, dVbseff_dVe, dVbseff_dT; /* v2.2 release */ double Vgb, dVgb_dVg, dVgb_dVd, dVgb_dVe, dVgb_dVb, Vox, dVox_dVg, dVox_dVd, dVox_dVb; double OxideRatio, Vaux, dVaux_dVg, dVaux_dVd, dVaux_dVb; double Igb, dIgb_dVg, dIgb_dVd, dIgb_dVb; double ceqgate; double dT0_dVox, Voxeff, dVoxeff_dVox; double dVox_dT, dVaux_dT, dIgb_dT; double Voxacc, dVoxacc_dVg, dVoxacc_dVd, dVoxacc_dVe, dVoxacc_dVb, dVoxacc_dT; double Voxdepinv, dVoxdepinv_dVg, dVoxdepinv_dVb, dVoxdepinv_dVd, dVoxdepinv_dT; double Igb1, dIgb1_dVg, dIgb1_dVd, dIgb1_dVb, dIgb1_dT; double Igb2, dIgb2_dVg, dIgb2_dVd, dIgb2_dVb, dIgb2_dVe, dIgb2_dT; double gigs, gigd, gigb, gigg, gigT, gige; /* LFW_FD enhance line */ double gigpg, gigpp; /* v4.0 */ double IdlovVdseff, dIdlovVdseff_dVg, dIdlovVdseff_dVd, dIdlovVdseff_dVb; double vdbs, vsbs, vdbd=0.0, vsbd, vsbdo, vbs_jct, vbd_jct; double Vsbs, Vdbd, Vdbs; double delvdbd, delvsbs, delvdbs, delvbd_jct, delvbs_jct; double gcdbdb, gcsbsb, gcsbb, gcdbb; double ceqqjd=0.0, ceqqjs=0.0; double Lpe_Vb; /* v4.0 for Vth */ double DITS_Sft, DITS_Sft2, dDITS_Sft_dVb, dDITS_Sft_dVd, dDITS_Sft2_dVd, dDITS_Sft_dT; double FP, dFP_dT, dFP_dVg, dFP_dVb, dFP_dVd, dFP_dVe; double VADITS, dVADITS_dVg, dVADITS_dVd, dVADITS_dVb, dVADITS_dVe, dVADITS_dT; /* for DITS */ double Iii_Igidl, Giigidl_b, Giigidl_d, Giigidl_g, Giigidl_e, Giigidl_T; double gjsdb; double Idbdp=0.0, Isbsp=0.0, cdbdp, csbsp, gcjdbdp, gcjsbsp, GGjdb, GGjsb; double vdes, vses, vdedo, delvdes, delvses, delvded, Isestot, cseshat, Idedtot, cdedhat; double PowWeffWr, rd0=0.0, rs0=0.0, rdwmin=0.0, rswmin=0.0, drs0_dT=0.0, drd0_dT=0.0, drswmin_dT=0.0, drdwmin_dT=0.0, Rd, dRd_dVg, dRd_dVb, dRd_dT, Rs, dRs_dVg, dRs_dVb, dRs_dT; double dgstot_dvd, dgstot_dvg, dgstot_dvs, dgstot_dvb, dgstot_dve, dgstot_dT; double dgdtot_dvd, dgdtot_dvg, dgdtot_dvs, dgdtot_dvb, dgdtot_dve, dgdtot_dT; double gstot, gstotd, gstotg, gstots, gstotb, ceqgstot; double gdtot, gdtotd, gdtotg, gdtots, gdtotb, ceqgdtot; double gdpr, gspr; /*4.1*/ double toxe, epsrox, epssub, epsgate; double Tnom, Eg0, Vtm0; double Vbci, Idsmosfet, Iiibjt; double dVbci_dT, dIiibjt_dVd, dIiibjt_dVb, dIiibjt_dT; double VgsteffVth, dT11_dVg; /* v4.1 */ /* Jun 09 */ double toxe_mob ; /* Jun 09 */ double dTheta0_dT, dn_dT, dsqrtPhisExt_dT, dT3zb_dT, dltwzb_dT, dlt1zb_dT, dTheta0zb_dT, dvth0_dT, dDIBL_Sft_dT,dtmp2_dT; /* v4.2 temp deriv */ double Vgd, Vgd_eff, dVgd_eff_dVg, dVgd_eff_dT; /* enhanced line Wagner */ double dVbs0mos_dVd; double Ig_agbcp2, dIg_agbcp2_dVg, dIg_agbcp2_dVp, dIg_agbcp2_dT; double vgp_eff, vgp=0.0, dvgp_eff_dvg, dvgp_eff_dvp, dvgp_eff_dT; /* improved body contact charge model */ double CoxWL2, CoxWLb2; double ExpVgst2, Vgsteff2, VgstNVt2, ExpArg2; double dVgsteff2_dVd, dVgsteff2_dVg, dVgsteff2_dVb, dVgsteff2_dVe, dVgsteff2_dT; double T02; double Qac02, dQac02_dVrg, dQac02_dVd, dQac02_dVg, dQac02_dVb, dQac02_dVe, dQac02_dT; double Vgs_eff2, dVgs_eff2_dVg; double Vfbzb2, dVfbzb2_dT; double Vfb2, dVfb2_dVg, dVfb2_dVd, dVfb2_dVb, dVfb2_dVe, dVfb2_dT; double Vfbeff2, dVfbeff2_dVd, dVfbeff2_dVrg, dVfbeff2_dVg, dVfbeff2_dVb, dVfbeff2_dVe, dVfbeff2_dT; double Qsub02, dQsub02_dVg, dQsub02_dVrg, dQsub02_dVd, dQsub02_dVb, dQsub02_dVe, dQsub02_dT; double VdsatCV2, dVdsatCV2_dVg, dVdsatCV2_dVb, dVdsatCV2_dVd, dVdsatCV2_dVe, dVdsatCV2_dT; double VdseffCV2, dVdseffCV2_dVg, dVdseffCV2_dVd, dVdseffCV2_dVb, dVdseffCV2_dVe, dVdseffCV2_dT; double Cbg12, Cbd12, Cbb12, Cbe12; double Cgg12, Cgd12, Cgb12, Cge12; double Csg12, Csd12, Csb12, Cse12; double Tcen2, dTcen2_dVg, dTcen2_dVd, dTcen2_dVb, dTcen2_dVe, dTcen2_dT; double Ccen2; double Coxeff2, dCoxeff2_dVg, dCoxeff2_dVd, dCoxeff2_dVb, dCoxeff2_dVe, dCoxeff2_dT; double CoxWLcenb2, dCoxWLcenb2_dT; double QovCox2; double DeltaPhi2, dDeltaPhi2_dVg, dDeltaPhi2_dVd, dDeltaPhi2_dVb, dDeltaPhi2_dVe; double dDeltaPhi2_dT; /* new line Wagner */ double CoxWLcen2; double T22, T52; double qsrc2, qbulk2; double dqsrc2_dT, dqbulk2_dT; /* new line Wagner */ double Csg2, Csd2, Csb2, Cse2; double DELTA_3_SOI2; double dphi_dT,dsqrtPhi_dT,dXdep0_dT,cdep0,theta0vb0,dtheta0vb0_dT; double thetaRout,dthetaRout_dT,dcdep0_dT; double dPhis_dT,dsqrtPhis_dT,dXdep_dT,dlt1_dT,dltw_dT; double agidl, bgidl, cgidl, egidl, rgidl, kgidl, fgidl; double agisl, bgisl, cgisl, egisl, rgisl, kgisl, fgisl; double ucs, ud; /* Bugfix # 21 Jul09*/ double ndiode, ndioded; /* v4.2 bugfix */ double nrecf0s, nrecf0d, nrecr0s, nrecr0d, vrec0s, vrec0d, ntuns, ntund, vtun0s,vtun0d;/*bugfix for junction DC swapping */ double eggbcp2, eggdep, agb1, bgb1, agb2, bgb2, agbc2n, agbc2p, bgbc2n, bgbc2p, Vtm00; /* v4.3.1 bugfix for mtrlMod=1 -Tanvir */ double m; #ifndef USE_OMP for (; model != NULL; model = B4SOInextModel(model)) { for (here = B4SOIinstances(model); here != NULL; here = B4SOInextInstance(here)) { #endif Check = 0; ByPass = 0; selfheat = (model->B4SOIshMod == 1) && (here->B4SOIrth0 != 0.0); pParam = here->pParam; if ((ckt->CKTmode & MODEINITSMSIG)) { vs = *(ckt->CKTrhsOld + here->B4SOIsNodePrime); if (!here->B4SOIvbsusrGiven) { vbs = *(ckt->CKTstate0 + here->B4SOIvbs); vb = *(ckt->CKTrhsOld + here->B4SOIbNode); } else { vbs = here->B4SOIvbsusr; vb = here->B4SOIvbsusr + vs; } vgs = *(ckt->CKTstate0 + here->B4SOIvgs); ves = *(ckt->CKTstate0 + here->B4SOIves); vps = *(ckt->CKTstate0 + here->B4SOIvps); vds = *(ckt->CKTstate0 + here->B4SOIvds); delTemp = *(ckt->CKTstate0 + here->B4SOIdeltemp); /* v4.0 */ vdbs = *(ckt->CKTstate0 + here->B4SOIvdbs); /* v4.0 for rbody */ vdbd = *(ckt->CKTstate0 + here->B4SOIvdbd); /* v4.0 for rbody */ vsbs = *(ckt->CKTstate0 + here->B4SOIvsbs); /* v4.0 for rbody */ vses = *(ckt->CKTstate0 + here->B4SOIvses); /* v4.0 for rdsmod*/ vdes = *(ckt->CKTstate0 + here->B4SOIvdes); /* v4.0 for rdsmod*/ /* v4.0 end */ vg = *(ckt->CKTrhsOld + here->B4SOIgNode); vd = *(ckt->CKTrhsOld + here->B4SOIdNodePrime); vp = *(ckt->CKTrhsOld + here->B4SOIpNode); ve = *(ckt->CKTrhsOld + here->B4SOIeNode); /* v3.1 added for RF */ vgge = *(ckt->CKTrhsOld + here->B4SOIgNodeExt); vggm = *(ckt->CKTrhsOld + here->B4SOIgNodeMid); vges = *(ckt->CKTstate0 + here->B4SOIvges); vgms = *(ckt->CKTstate0 + here->B4SOIvgms); /* v3.1 added for RF end*/ } else if ((ckt->CKTmode & MODEINITTRAN)) { vs = *(ckt->CKTrhsOld + here->B4SOIsNodePrime); if (!here->B4SOIvbsusrGiven) { vbs = *(ckt->CKTstate1 + here->B4SOIvbs); vb = *(ckt->CKTrhsOld + here->B4SOIbNode); } else { vbs = here->B4SOIvbsusr; vb = here->B4SOIvbsusr + vs; } vgs = *(ckt->CKTstate1 + here->B4SOIvgs); ves = *(ckt->CKTstate1 + here->B4SOIves); vps = *(ckt->CKTstate1 + here->B4SOIvps); vds = *(ckt->CKTstate1 + here->B4SOIvds); delTemp = *(ckt->CKTstate1 + here->B4SOIdeltemp); /* v4.0 */ vdbs = *(ckt->CKTstate1 + here->B4SOIvdbs); /* v4.0 for rbody */ vsbs = *(ckt->CKTstate1 + here->B4SOIvsbs); /* v4.0 for rbody */ vses = *(ckt->CKTstate1 + here->B4SOIvses); /* v4.0 for rdsmod */ vdes = *(ckt->CKTstate1 + here->B4SOIvdes); /* v4.0 for rdsmod */ /* v4.0 end */ vg = *(ckt->CKTrhsOld + here->B4SOIgNode); vd = *(ckt->CKTrhsOld + here->B4SOIdNodePrime); vp = *(ckt->CKTrhsOld + here->B4SOIpNode); ve = *(ckt->CKTrhsOld + here->B4SOIeNode); /* v3.1 added for RF */ vgge = *(ckt->CKTrhsOld + here->B4SOIgNodeExt); vggm = *(ckt->CKTrhsOld + here->B4SOIgNodeMid); vges = *(ckt->CKTstate1 + here->B4SOIvges); vgms = *(ckt->CKTstate1 + here->B4SOIvgms); /* v3.1 added for RF end*/ } else if ((ckt->CKTmode & MODEINITJCT) && !here->B4SOIoff) { vds = model->B4SOItype * here->B4SOIicVDS; vgs = model->B4SOItype * here->B4SOIicVGS; ves = model->B4SOItype * here->B4SOIicVES; vbs = model->B4SOItype * here->B4SOIicVBS; vps = model->B4SOItype * here->B4SOIicVPS; vdbs = vsbs = vbs; /* v4.0 */ vg = vd = vs = vp = ve = 0.0; /* v3.1 added for RF */ vges = vgms = vgs; vgge = vggm =0.0; /* v3.1 added for RF end*/ if (vds > 0.0) /* v4.0 */ { vdes = vds + 0.01; vses = -0.01; } else if (vds < 0.0) { vdes = vds - 0.01; vses = 0.01; } else vdes = vses = 0.0; delTemp = 0.0; here->B4SOIphi = pParam->B4SOIphi; if ((vds == 0.0) && (vgs == 0.0) && (vbs == 0.0) && ((ckt->CKTmode & (MODETRAN | MODEAC|MODEDCOP | MODEDCTRANCURVE)) || (!(ckt->CKTmode & MODEUIC)))) { /* vgs = model->B4SOItype*0.1 + here->B4SOIvth0; */ vgs = model->B4SOItype * here->B4SOIvth0 + 0.1; /* v4.0 */ vds = 0.0; ves = 0.0; vps = 0.0; vges = vgms = vgs; /* v3.1 */ vbs = vdbs = vsbs = 0.0; /* v4.0 */ vdes = 0.01; /* v4.0 for rdsmod */ vses = -0.01; /* v4.0 for rdsmod */ } } else if ((ckt->CKTmode & (MODEINITJCT | MODEINITFIX)) && (here->B4SOIoff)) { delTemp = vps = vbs = vgs = vds = ves = 0.0; vg = vd = vs = vp = ve = 0.0; vgge = vggm = 0.0; /* v3.1 */ vges = vgms =0.0; /* v3.1 */ vdbs = vsbs = vdes = vses = 0.0; /* v4.0 */ } else { #ifndef PREDICTOR if ((ckt->CKTmode & MODEINITPRED)) { xfact = ckt->CKTdelta / ckt->CKTdeltaOld[1]; *(ckt->CKTstate0 + here->B4SOIvbs) = *(ckt->CKTstate1 + here->B4SOIvbs); vbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvbs)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvbs))); *(ckt->CKTstate0 + here->B4SOIvgs) = *(ckt->CKTstate1 + here->B4SOIvgs); vgs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvgs)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvgs))); *(ckt->CKTstate0 + here->B4SOIves) = *(ckt->CKTstate1 + here->B4SOIves); ves = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIves)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIves))); *(ckt->CKTstate0 + here->B4SOIvps) = *(ckt->CKTstate1 + here->B4SOIvps); vps = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvps)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvps))); *(ckt->CKTstate0 + here->B4SOIvds) = *(ckt->CKTstate1 + here->B4SOIvds); vds = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvds)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvds))); *(ckt->CKTstate0 + here->B4SOIvbd) = *(ckt->CKTstate0 + here->B4SOIvbs) - *(ckt->CKTstate0 + here->B4SOIvds); /* v4.0 */ *(ckt->CKTstate0 + here->B4SOIvdbs) = *(ckt->CKTstate1 + here->B4SOIvdbs); vdbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvdbs)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvdbs))); *(ckt->CKTstate0 + here->B4SOIvdbd) = *(ckt->CKTstate0 + here->B4SOIvdbs) - *(ckt->CKTstate0 + here->B4SOIvds); *(ckt->CKTstate0 + here->B4SOIvsbs) = *(ckt->CKTstate1 + here->B4SOIvsbs); vsbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvsbs)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvsbs))); *(ckt->CKTstate0 + here->B4SOIvses) = *(ckt->CKTstate1 + here->B4SOIvses); vses = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvses)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvses))); *(ckt->CKTstate0 + here->B4SOIvdes) = *(ckt->CKTstate1 + here->B4SOIvdes); vdes = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvdes)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvdes))); /* v4.0 end */ *(ckt->CKTstate0 + here->B4SOIvg) = *(ckt->CKTstate1 + here->B4SOIvg); *(ckt->CKTstate0 + here->B4SOIvd) = *(ckt->CKTstate1 + here->B4SOIvd); *(ckt->CKTstate0 + here->B4SOIvs) = *(ckt->CKTstate1 + here->B4SOIvs); *(ckt->CKTstate0 + here->B4SOIvp) = *(ckt->CKTstate1 + here->B4SOIvp); *(ckt->CKTstate0 + here->B4SOIve) = *(ckt->CKTstate1 + here->B4SOIve); /* v3.1 added for RF */ *(ckt->CKTstate0 + here->B4SOIvgge) = *(ckt->CKTstate1 + here->B4SOIvgge); *(ckt->CKTstate0 + here->B4SOIvggm) = *(ckt->CKTstate1 + here->B4SOIvggm); *(ckt->CKTstate0 + here->B4SOIvges) = *(ckt->CKTstate1 + here->B4SOIvges); vges = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvges)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvges))); *(ckt->CKTstate0 + here->B4SOIvgms) = *(ckt->CKTstate1 + here->B4SOIvgms); vgms = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIvgms)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIvgms))); /* v3.1 added for RF end */ /* Only predict ve */ ve = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIve)) - (xfact * (*(ckt->CKTstate2 + here->B4SOIve))); /* Then update vg, vs, vb, vd, vp base on ve */ vs = ve - model->B4SOItype * ves; vg = model->B4SOItype * vgs + vs; vd = model->B4SOItype * vds + vs; vb = model->B4SOItype * vbs + vs; vp = model->B4SOItype * vps + vs; vgge = model->B4SOItype * vges + vs; /* v3.1 */ vggm = model->B4SOItype * vgms + vs; /* v3.1 */ delTemp = (1.0 + xfact)* (*(ckt->CKTstate1 + here->B4SOIdeltemp))-(xfact * (*(ckt->CKTstate2 + here->B4SOIdeltemp))); /* v2.2.3 bug fix */ *(ckt->CKTstate0 + here->B4SOIdeltemp) = *(ckt->CKTstate1 + here->B4SOIdeltemp); /* if (selfheat) { here->B4SOIphi = 2.0 * here->B4SOIvtm * log(pParam->B4SOInpeak / here->B4SOIni); } v4.2 bugfix never used in the code */ } else { #endif /* PREDICTOR */ vg = B4SOIlimit(*(ckt->CKTrhsOld + here->B4SOIgNode), *(ckt->CKTstate0 + here->B4SOIvg), 3.0, &Check); vd = B4SOIlimit(*(ckt->CKTrhsOld + here->B4SOIdNodePrime), *(ckt->CKTstate0 + here->B4SOIvd), 3.0, &Check); vs = B4SOIlimit(*(ckt->CKTrhsOld + here->B4SOIsNodePrime), *(ckt->CKTstate0 + here->B4SOIvs), 3.0, &Check); vp = B4SOIlimit(*(ckt->CKTrhsOld + here->B4SOIpNode), *(ckt->CKTstate0 + here->B4SOIvp), 3.0, &Check); ve = B4SOIlimit(*(ckt->CKTrhsOld + here->B4SOIeNode), *(ckt->CKTstate0 + here->B4SOIve), 3.0, &Check); /* v3.1 added for RF */ vgge = B4SOIlimit(*(ckt->CKTrhsOld + here->B4SOIgNodeExt), *(ckt->CKTstate0 + here->B4SOIvgge), 3.0, &Check); vggm = B4SOIlimit(*(ckt->CKTrhsOld + here->B4SOIgNodeMid), *(ckt->CKTstate0 + here->B4SOIvggm), 3.0, &Check); /* v3.1 added for RF end */ delTemp = *(ckt->CKTrhsOld + here->B4SOItempNode); vbs = model->B4SOItype * (*(ckt->CKTrhsOld+here->B4SOIbNode) - *(ckt->CKTrhsOld+here->B4SOIsNodePrime)); vps = model->B4SOItype * (vp - vs); vgs = model->B4SOItype * (vg - vs); ves = model->B4SOItype * (ve - vs); vds = model->B4SOItype * (vd - vs); vges = model->B4SOItype * (vgge - vs); /* v3.1 */ vgms = model->B4SOItype * (vggm - vs); /* v3.1 */ /* v4.0 */ vdbs = model->B4SOItype * (*(ckt->CKTrhsOld + here->B4SOIdbNode) - *(ckt->CKTrhsOld + here->B4SOIsNodePrime)); vsbs = model->B4SOItype * (*(ckt->CKTrhsOld + here->B4SOIsbNode) - *(ckt->CKTrhsOld + here->B4SOIsNodePrime)); vses = model->B4SOItype * (*(ckt->CKTrhsOld + here->B4SOIsNode) - *(ckt->CKTrhsOld + here->B4SOIsNodePrime)); vdes = model->B4SOItype * (*(ckt->CKTrhsOld + here->B4SOIdNode) - *(ckt->CKTrhsOld + here->B4SOIsNodePrime)); /* v4.0 end */ #ifndef PREDICTOR } #endif /* PREDICTOR */ vbd = vbs - vds; vdbd = vdbs - vds; /* v4.0 */ vgd = vgs - vds; ved = ves - vds; vgdo = *(ckt->CKTstate0 + here->B4SOIvgs) - *(ckt->CKTstate0 + here->B4SOIvds); vedo = *(ckt->CKTstate0 + here->B4SOIves) - *(ckt->CKTstate0 + here->B4SOIvds); /* v3.1 for RF */ vgedo = *(ckt->CKTstate0 + here->B4SOIvges) - *(ckt->CKTstate0 + here->B4SOIvds); vgmdo = *(ckt->CKTstate0 + here->B4SOIvgms) - *(ckt->CKTstate0 + here->B4SOIvds); vged = vges - vds; vgmd = vgms - vds; delvged = vged - vgedo; delvgmd = vgmd - vgmdo; /* v3.1 for RF end*/ delvbs = vbs - *(ckt->CKTstate0 + here->B4SOIvbs); delvbd = vbd - *(ckt->CKTstate0 + here->B4SOIvbd); delvgs = vgs - *(ckt->CKTstate0 + here->B4SOIvgs); delves = ves - *(ckt->CKTstate0 + here->B4SOIves); delvps = vps - *(ckt->CKTstate0 + here->B4SOIvps); deldelTemp = delTemp - *(ckt->CKTstate0 + here->B4SOIdeltemp); delvds = vds - *(ckt->CKTstate0 + here->B4SOIvds); delvgd = vgd - vgdo; delved = ved - vedo; delvges = vges - *(ckt->CKTstate0 + here->B4SOIvges); /* v3.1 */ delvgms = vgms - *(ckt->CKTstate0 + here->B4SOIvgms); /* v3.1 */ delvdbd = vdbd - *(ckt->CKTstate0 + here->B4SOIvdbd); /* v4.0 */ delvdbs = vdbs - *(ckt->CKTstate0 + here->B4SOIvdbs); /* v4.0 */ delvsbs = vsbs - *(ckt->CKTstate0 + here->B4SOIvsbs); /* v4.0 */ delvbd_jct = (!here->B4SOIrbodyMod) ? delvbd : delvdbd; /*v4.0*/ delvbs_jct = (!here->B4SOIrbodyMod) ? delvbs : delvsbs; /*v4.0*/ delvses = vses - *(ckt->CKTstate0 + here->B4SOIvses);/*v4.0*/ vdedo = *(ckt->CKTstate0 + here->B4SOIvdes) - *(ckt->CKTstate0 + here->B4SOIvds); /* v4.0 */ delvdes = vdes - *(ckt->CKTstate0 + here->B4SOIvdes); /* v4.0 */ delvded = vdes - vds - vdedo; /* v4.0 */ if (here->B4SOImode >= 0) { cdhat = here->B4SOIcd + (here->B4SOIgm-here->B4SOIgjdg) * delvgs + (here->B4SOIgds - here->B4SOIgjdd) * delvds + (here->B4SOIgmbs * delvbs - here->B4SOIgjdb * delvbs_jct ) /* v4.0 */ + (here->B4SOIgme - here->B4SOIgjde) * delves + (here->B4SOIgmT - here->B4SOIgjdT) * deldelTemp; /* v3.0 */ } else { cdhat = here->B4SOIcd + (here->B4SOIgm-here->B4SOIgjdg) * delvgd - (here->B4SOIgds - here->B4SOIgjdd) * delvds + (here->B4SOIgmbs * delvbd - here->B4SOIgjdb * delvbd_jct ) /*v4.0 */ + (here->B4SOIgme - here->B4SOIgjde) * delved + (here->B4SOIgmT - here->B4SOIgjdT) * deldelTemp; /* v3.0 */ } cbhat = here->B4SOIcb + here->B4SOIgbgs * delvgs + here->B4SOIgbbs * delvbs + here->B4SOIgbds * delvds + here->B4SOIgbes * delves + here->B4SOIgbps * delvps + here->B4SOIgbT * deldelTemp; /* v3.0 */ Isestot = here->B4SOIgstot * (*(ckt->CKTstate0 + here->B4SOIvses)); cseshat = Isestot + here->B4SOIgstot * delvses + here->B4SOIgstotd * delvds + here->B4SOIgstotg * delvgs + here->B4SOIgstotb * delvbs; Idedtot = here->B4SOIgdtot * vdedo; cdedhat = Idedtot + here->B4SOIgdtot * delvded + here->B4SOIgdtotd * delvds + here->B4SOIgdtotg * delvgs + here->B4SOIgdtotb * delvbs; #ifndef NOBYPASS /* following should be one big if connected by && all over * the place, but some C compilers can't handle that, so * we split it up here to let them digest it in stages */ if ((!(ckt->CKTmode & MODEINITPRED)) && (ckt->CKTbypass) && Check == 0) if ((here->B4SOIsoiMod == 2) || /* v3.2 */ (fabs(delvbs) < (ckt->CKTreltol * MAX(fabs(vbs), fabs(*(ckt->CKTstate0+here->B4SOIvbs))) + ckt->CKTvoltTol)) ) if ((here->B4SOIsoiMod == 2) || /* v3.2 */ (fabs(delvbd) < (ckt->CKTreltol * MAX(fabs(vbd), fabs(*(ckt->CKTstate0+here->B4SOIvbd))) + ckt->CKTvoltTol)) ) if ((fabs(delvgs) < (ckt->CKTreltol * MAX(fabs(vgs), fabs(*(ckt->CKTstate0+here->B4SOIvgs))) + ckt->CKTvoltTol))) if ((fabs(delves) < (ckt->CKTreltol * MAX(fabs(ves), fabs(*(ckt->CKTstate0+here->B4SOIves))) + ckt->CKTvoltTol))) if ( (here->B4SOIbodyMod == 0) || (here->B4SOIbodyMod == 2) || (fabs(delvps) < (ckt->CKTreltol * MAX(fabs(vps), fabs(*(ckt->CKTstate0+here->B4SOIvps))) + ckt->CKTvoltTol)) ) if ( (here->B4SOItempNode == 0) || (fabs(deldelTemp) < (ckt->CKTreltol * MAX(fabs(delTemp), fabs(*(ckt->CKTstate0+here->B4SOIdeltemp))) + ckt->CKTvoltTol*1e4))) /* v3.1 added for RF */ if ((here->B4SOIrgateMod == 0) || (here->B4SOIrgateMod == 1) || (fabs(delvges) < (ckt->CKTreltol * MAX(fabs(vges), fabs(*(ckt->CKTstate0 + here->B4SOIvges))) + ckt->CKTvoltTol))) if ((here->B4SOIrgateMod != 3) || (fabs(delvgms) < (ckt->CKTreltol * MAX(fabs(vgms), fabs(*(ckt->CKTstate0 + here->B4SOIvgms))) + ckt->CKTvoltTol))) /* v3.1 added for RF end */ /* v4.0 */ if ((!here->B4SOIrbodyMod) || (fabs(delvdbs) < (ckt->CKTreltol * MAX(fabs(vdbs), fabs(*(ckt->CKTstate0 + here->B4SOIvdbs))) + ckt->CKTvoltTol))) if ((!here->B4SOIrbodyMod) || (fabs(delvdbd) < (ckt->CKTreltol * MAX(fabs(vdbd), fabs(*(ckt->CKTstate0 + here->B4SOIvdbd))) + ckt->CKTvoltTol))) if ((!here->B4SOIrbodyMod) || (fabs(delvsbs) < (ckt->CKTreltol * MAX(fabs(vsbs), fabs(*(ckt->CKTstate0 + here->B4SOIvsbs))) + ckt->CKTvoltTol))) if ((!model->B4SOIrdsMod) || (fabs(delvses) < (ckt->CKTreltol * MAX(fabs(vses), fabs(*(ckt->CKTstate0 + here->B4SOIvses))) + ckt->CKTvoltTol))) if ((!model->B4SOIrdsMod) || (fabs(delvdes) < (ckt->CKTreltol * MAX(fabs(vdes), fabs(*(ckt->CKTstate0 + here->B4SOIvdes))) + ckt->CKTvoltTol))) if ((!model->B4SOIrdsMod) || ((fabs(cseshat - Isestot) < ckt->CKTreltol * MAX(fabs(cseshat), fabs(Isestot)) + ckt->CKTabstol))) if ((!model->B4SOIrdsMod) || ((fabs(cdedhat - Idedtot) < ckt->CKTreltol * MAX(fabs(cdedhat), fabs(Idedtot)) + ckt->CKTabstol))) /* v4.0 end */ if ((fabs(delvds) < (ckt->CKTreltol * MAX(fabs(vds), fabs(*(ckt->CKTstate0+here->B4SOIvds))) + ckt->CKTvoltTol))) if ((fabs(cdhat - here->B4SOIcd) < ckt->CKTreltol * MAX(fabs(cdhat),fabs(here->B4SOIcd)) + ckt->CKTabstol)) if ((here->B4SOIsoiMod == 2) || /* v3.2 */ (fabs(cbhat - here->B4SOIcb) < ckt->CKTreltol * MAX(fabs(cbhat),fabs(here->B4SOIcb)) + ckt->CKTabstol) ) { /* bypass code */ vbs = *(ckt->CKTstate0 + here->B4SOIvbs); vbd = *(ckt->CKTstate0 + here->B4SOIvbd); vgs = *(ckt->CKTstate0 + here->B4SOIvgs); ves = *(ckt->CKTstate0 + here->B4SOIves); vps = *(ckt->CKTstate0 + here->B4SOIvps); vds = *(ckt->CKTstate0 + here->B4SOIvds); /* v3.1 added for RF */ vges = *(ckt->CKTstate0 + here->B4SOIvges); vgms = *(ckt->CKTstate0 + here->B4SOIvgms); vged = vges - vds; vgmd = vgms - vds; vgme = vgms - ves; /* v3.1 added for RF end */ vgmb = vgms - vbs; /* v3.2 bug fix */ /* v4.0 */ vdbs = *(ckt->CKTstate0 + here->B4SOIvdbs); vdbd = *(ckt->CKTstate0 + here->B4SOIvdbd); vsbs = *(ckt->CKTstate0 + here->B4SOIvsbs); vbs_jct = (!here->B4SOIrbodyMod) ? vbs : vsbs; vbd_jct = (!here->B4SOIrbodyMod) ? vbd : vdbd; vses = *(ckt->CKTstate0 + here->B4SOIvses); vdes = *(ckt->CKTstate0 + here->B4SOIvdes); /* v4.0 end */ delTemp = *(ckt->CKTstate0 + here->B4SOIdeltemp); /* calculate Vds for temperature conductance calculation in bypass (used later when filling Temp node matrix) */ Vds = here->B4SOImode > 0 ? vds : -vds; vgd = vgs - vds; vgb = vgs - vbs; veb = ves - vbs; if ((ckt->CKTmode & (MODETRAN | MODEAC)) || ((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC))) { ByPass = 1; goto line755; } else { goto line850; } } #endif /*NOBYPASS*/ von = here->B4SOIvon; if (*(ckt->CKTstate0 + here->B4SOIvds) >= 0.0) { T0 = *(ckt->CKTstate0 + here->B4SOIvbs); /* v3.1 added for RF */ if (here->B4SOIrgateMod == 3) { vged = vges - vds; vgmd = vgms - vds; } else if ((here->B4SOIrgateMod == 1) || (here->B4SOIrgateMod == 2)) { vged = vges - vds; } /* v3.1 added for RF end*/ } else { T0 = *(ckt->CKTstate0 + here->B4SOIvbd); /* added for RF */ if (here->B4SOIrgateMod == 3) { vges = vged + vds; vgms = vgmd + vds; } if ((here->B4SOIrgateMod == 1) || (here->B4SOIrgateMod == 2)) { vges = vged + vds; } /* added for RF end*/ } if (vds >= 0.0) { vbs = B4SOIlimit(vbs, T0, 0.2, &Check); vbd = vbs - vds; vb = model->B4SOItype * vbs + vs; if (here->B4SOIrbodyMod) /* v4.0 */ { vdbs = B4SOIlimit(vdbs, *(ckt->CKTstate0 + here->B4SOIvdbs), 0.2, &Check); vdbd = vdbs - vds; vsbs = B4SOIlimit(vsbs, *(ckt->CKTstate0 + here->B4SOIvsbs), 0.2, &Check); } } else { vbd = B4SOIlimit(vbd, T0, 0.2, &Check); vbs = vbd + vds; vb = model->B4SOItype * vbs + vd; /* v4.0 */ if (here->B4SOIrbodyMod) { vdbd = B4SOIlimit(vdbd, *(ckt->CKTstate0 + here->B4SOIvdbd), 0.2, &Check); vdbs = vdbd + vds; vsbdo = *(ckt->CKTstate0 + here->B4SOIvsbs) - *(ckt->CKTstate0 + here->B4SOIvds); vsbd = vsbs - vds; vsbd = B4SOIlimit(vsbd, vsbdo, 0.2, &Check); vsbs = vsbd + vds; } /* v4.0 end */ } delTemp =B4SOIlimit(delTemp, *(ckt->CKTstate0 + here->B4SOIdeltemp),5.0,&Check); } if(model->B4SOImtrlMod) { epsrox = 3.9; toxe = model->B4SOIeot; epssub = EPS0 * model->B4SOIepsrsub; /* bugfix following constants should be replaced with model params -Tanvir */ eggbcp2 = 1.12; eggdep = 1.12; agb1 = 3.7622e-7; bgb1 = -3.1051e10; agb2 = 4.9758e-7; bgb2 = -2.357e10; agbc2n = 3.42537e-7; agbc2p = 4.97232e-7; bgbc2n = 1.16645e12; bgbc2p = 7.45669e11; } else { epsrox = model->B4SOIepsrox; toxe = model->B4SOItox; epssub = EPSSI; /* bugfix v4.3.1 following constants are valid for mtrlMod=0 -Tanvir */ eggbcp2 = 1.12; eggdep = 1.12; agb1 = 3.7622e-7; bgb1 = -3.1051e10; agb2 = 4.9758e-7; bgb2 = -2.357e10; agbc2n = 3.42537e-7; agbc2p = 4.97232e-7; bgbc2n = 1.16645e12; bgbc2p = 7.45669e11; } /* Calculate temperature dependent values for self-heating effect */ Temp = delTemp + ckt->CKTtemp; dTempRatio_dT = 1 / model->B4SOItnom; TempRatio = Temp * dTempRatio_dT; here->B4SOITempSH = Temp; /*v4.2 added for portability of SH Temp */ dEg_dT = 0.0; /* new line Wagner */ Vtm00= 0.026; /* v4.3.1 Vtm00 replaces hardcoded 0.026 -Tanvir */ if (selfheat) { if(model->B4SOImtrlMod==0) { Vtm = KboQ * Temp; T0 = 1108.0 + Temp; T5 = Temp * Temp; Eg = 1.16 - 7.02e-4 * T5 / T0; dEg_dT = T1 = ((7.02e-4 * T5) - T0 * (14.04e-4 * Temp)) / T0 / T0; /* enhanced line Wagner */ /* T1 = dEg / dT */ T2 = 1.9230584e-4; /* T2 = 1 / 300.15^(3/2) */ T5 = sqrt(Temp); T3 = 1.45e10 * Temp * T5 * T2; T4 = exp(21.5565981 - Eg / (2.0 * Vtm)); ni = T3 * T4; dni_dT = 2.175e10 * T2 * T5 * T4 + T3 * T4 * (-Vtm * T1 + Eg * KboQ) / (2.0 * Vtm * Vtm); T0 = log(1.0e20 * pParam->B4SOInpeak / (ni * ni)); vbi = Vtm * T0; dvbi_dT = KboQ * T0 + Vtm * (-2.0 * dni_dT / ni); } else { Tnom = model->B4SOItnom; Vtm = KboQ * Temp; Vtm0= KboQ * Tnom; Eg0 = model->B4SOIeg0; T0 = model->B4SOItbgbsub + Temp; T5 = Temp * Temp; Eg = model->B4SOIbg0sub - model->B4SOItbgasub * Temp * Temp / (Temp + model->B4SOItbgbsub); dEg_dT = T1 = ((model->B4SOItbgasub * T5) - T0 * (2.0*model->B4SOItbgasub * Temp)) / T0 / T0; /* enhanced line Wagner */ /* T1 = dEg / dT */ T2 = 1/sqrt(Tnom*Tnom*Tnom); T5 = sqrt(Temp); T3 = model->B4SOIni0sub * Temp * T5 * T2; T4 = exp(Eg0/(2.0*Vtm0) - Eg / (2.0 * Vtm)); ni = T3 * T4; dni_dT=1.5*model->B4SOIni0sub*T5*T2*T4+ T3*T4*(-Vtm * T1 + Eg * KboQ) / (2.0 * Vtm * Vtm); T0 = log(1.0e20 * pParam->B4SOInpeak / (ni * ni)); vbi = Vtm * T0; dvbi_dT = KboQ * T0 + Vtm * (-2.0 * dni_dT / ni); } if (pParam->B4SOInsub > 0) { T0 = log(pParam->B4SOInpeak / pParam->B4SOInsub); vfbb = -model->B4SOItype * Vtm * T0; dvfbb_dT = -model->B4SOItype * KboQ * T0; } else { T0 = log(-pParam->B4SOInpeak * pParam->B4SOInsub / ni / ni); vfbb = -model->B4SOItype * Vtm * T0; dvfbb_dT = -model->B4SOItype * (KboQ * T0 - Vtm * 2.0 * dni_dT / ni); } /* V4.0 changed phi */ phi = 2.0 * Vtm * log(pParam->B4SOInpeak / ni); /* phi = here->B4SOIphi; */ sqrtPhi = sqrt(phi); Xdep0 = sqrt(2.0 * epssub / (Charge_q * pParam->B4SOInpeak * 1.0e6)) * sqrtPhi; /* v4.1 SH bug fix */ /* dphi_dT = phi / Vtm * KboQ; v4.2 Temp Derivative bug fix */ dphi_dT = phi / Vtm * KboQ - 2.0 * Vtm * dni_dT / ni; dsqrtPhi_dT = 0.5 / sqrtPhi * dphi_dT; dXdep0_dT = Xdep0 / sqrtPhi * dsqrtPhi_dT; /* cdep0 = sqrt(Charge_q * EPSSI * pParam->B4SOInpeak * 1.0e6 / 2.0) / sqrtPhi; */ /* Bug fix #2 Jun 09 Body type is generalized for mtrlMod 1*/ cdep0 = sqrt(Charge_q * epssub /* Fix */ * pParam->B4SOInpeak * 1.0e6 / 2.0) / sqrtPhi; /* fix LHS name - Wagner */ /*dcep0_dT = cdep0 * sqrtPhi * (-1.0) / phi * dsqrtPhi_dT; */ dcdep0_dT = cdep0 * sqrtPhi * (-1.0) / phi * dsqrtPhi_dT; /* T1 = sqrt(EPSSI / (model->B4SOIepsrox * EPSOX / 3.9) Bug fix #3 Jun 09 Body type is generalized for mtrlMod 1*/ /* * model->B4SOItox * Xdep0); */ T1 = sqrt(epssub / (epsrox * EPS0) /* Fix */ * toxe * Xdep0); dT1_dT = 0.5 * T1 / Xdep0 * dXdep0_dT; T0 = exp(-0.5 * pParam->B4SOIdsub * pParam->B4SOIleff / T1); dT0_dT = T0 * 0.5 * pParam->B4SOIdsub * pParam->B4SOIleff / T1 / T1 * dT1_dT; theta0vb0 = (T0 + 2.0 * T0 * T0); dtheta0vb0_dT = (1.0 + 4.0 * T0) * dT0_dT; T0 = exp(-0.5 * pParam->B4SOIdrout * pParam->B4SOIleff / T1); dT0_dT = T0 * 0.5 * pParam->B4SOIdrout * pParam->B4SOIleff / T1 / T1 * dT1_dT; T2 = (T0 + 2.0 * T0 * T0); thetaRout = pParam->B4SOIpdibl1 * T2 + pParam->B4SOIpdibl2; dthetaRout_dT = pParam->B4SOIpdibl1 * (1.0 + 4.0 * T0) * dT0_dT; /* Save the values below for phi calculation in B4SOIaccept() */ here->B4SOIvtm = Vtm; /* here->B4SOIni = ni; v4.2 bugfix never used in the code */ T3 = TempRatio - 1.0; T8 = 1/ model->B4SOItnom; T4 = Eg300 / Vtm * T3; dT4_dT = Eg300 / Vtm / Vtm * (Vtm * T8 - T3 * KboQ); T7 = pParam->B4SOIxbjt * T4 / pParam->B4SOIndiode; dT7_dT = pParam->B4SOIxbjt * dT4_dT / pParam->B4SOIndiode; DEXP(T7, T0, dT0_dT7); dT0_dT = dT0_dT7 * dT7_dT; if (pParam->B4SOIxbjt == pParam->B4SOIxdif) { T1 = T0; dT1_dT = dT0_dT; } else { T7 = pParam->B4SOIxdif * T4 / pParam->B4SOIndiode; dT7_dT = pParam->B4SOIxdif * dT4_dT / pParam->B4SOIndiode; DEXP(T7, T1, dT1_dT7); dT1_dT = dT1_dT7 * dT7_dT; } T7 = pParam->B4SOIxrec * T4 / pParam->B4SOInrecf0; dT7_dT = pParam->B4SOIxrec * dT4_dT / pParam->B4SOInrecf0; DEXP(T7, T2, dT2_dT7); dT2_dT = dT2_dT7 * dT7_dT; /* high level injection */ Ahlis = pParam->B4SOIahli * T0; dAhlis_dT = pParam->B4SOIahli * dT0_dT; jbjts = pParam->B4SOIisbjt * T0; jdifs = pParam->B4SOIisdif * T1; jrecs = pParam->B4SOIisrec * T2; djbjts_dT = pParam->B4SOIisbjt * dT0_dT; djdifs_dT = pParam->B4SOIisdif * dT1_dT; djrecs_dT = pParam->B4SOIisrec * dT2_dT; T7 = pParam->B4SOIxtun * T3; dT7_dT = pParam->B4SOIxtun * T8; DEXP(T7, T0, dT0_dT7); dT0_dT = dT0_dT7 * dT7_dT; jtuns = pParam->B4SOIistun * T0; djtuns_dT = pParam->B4SOIistun * dT0_dT; /* drain side */ T7 = pParam->B4SOIxbjt * T4 / pParam->B4SOIndioded; dT7_dT = pParam->B4SOIxbjt * dT4_dT / pParam->B4SOIndioded; DEXP(T7, T0, dT0_dT7); dT0_dT = dT0_dT7 * dT7_dT; if (pParam->B4SOIxbjt == pParam->B4SOIxdifd) { T1 = T0; dT1_dT = dT0_dT; } else { T7 = pParam->B4SOIxdifd * T4 / pParam->B4SOIndioded; dT7_dT = pParam->B4SOIxdifd * dT4_dT / pParam->B4SOIndioded; DEXP(T7, T1, dT1_dT7); dT1_dT = dT1_dT7 * dT7_dT; } T7 = pParam->B4SOIxrecd * T4 / pParam->B4SOInrecf0d; dT7_dT = pParam->B4SOIxrecd * dT4_dT / pParam->B4SOInrecf0d; DEXP(T7, T2, dT2_dT7); dT2_dT = dT2_dT7 * dT7_dT; /* high level injection */ Ahlid = pParam->B4SOIahlid * T0; dAhlid_dT = pParam->B4SOIahlid * dT0_dT; jbjtd = pParam->B4SOIidbjt * T0; jdifd = pParam->B4SOIiddif * T1; jrecd = pParam->B4SOIidrec * T2; djbjtd_dT = pParam->B4SOIidbjt * dT0_dT; djdifd_dT = pParam->B4SOIiddif * dT1_dT; djrecd_dT = pParam->B4SOIidrec * dT2_dT; T7 = pParam->B4SOIxtund * T3; dT7_dT = pParam->B4SOIxtund * T8; DEXP(T7, T0, dT0_dT7); dT0_dT = dT0_dT7 * dT7_dT; jtund = pParam->B4SOIidtun * T0; djtund_dT = pParam->B4SOIidtun * dT0_dT; u0temp = pParam->B4SOIu0 * pow(TempRatio, pParam->B4SOIute); du0temp_dT = pParam->B4SOIu0 * pParam->B4SOIute * pow(TempRatio, pParam->B4SOIute - 1.0) * T8; ku0temp = pParam->B4SOIku0 * (1.0 /* + model->B4SOItku0 * TempRatio) + DELTA; v4.2 bugfix */ + model->B4SOItku0 * T3) + DELTA; dku0temp_dT = pParam->B4SOIku0 * model->B4SOItku0 * T8; T2 = ku0temp * ku0temp; T7 = model->B4SOIku0 * pParam->B4SOIinv_od_ref; rho_ref = T7 / ku0temp; drho_ref_dT = -T7 / T2 * dku0temp_dT; T4 = model->B4SOIku0 * here->B4SOIInv_ODeff; rho = T4 / ku0temp; drho_dT = -T4 / T2 * dku0temp_dT; T2 = (1.0 + rho); T7 = (1.0 + rho_ref); T0 = T2 / T7; dT0_dT = (drho_dT * T7 - drho_ref_dT * T2 ) / T7 / T7; du0temp_dT = T0 * du0temp_dT + u0temp * dT0_dT; u0temp *= T0; vsattemp = pParam->B4SOIvsat - pParam->B4SOIat * T3; dvsattemp_dT = -pParam->B4SOIat * T8; T2 = (1.0 + here->B4SOIkvsat * rho); T7 = (1.0 + here->B4SOIkvsat * rho_ref); T0 = T2 / T7; dT0_dT = (here->B4SOIkvsat * drho_dT * T7 - here->B4SOIkvsat * drho_ref_dT * T2) / T7 / T7; dvsattemp_dT = dvsattemp_dT * T0 + vsattemp * dT0_dT; vsattemp *= T0; here->B4SOIvsattemp = vsattemp; if (!model->B4SOIrdsMod) { rds0 = (pParam->B4SOIrdsw + pParam->B4SOIprt * T3) / pParam->B4SOIrds0denom; drds0_dT = pParam->B4SOIprt / pParam->B4SOIrds0denom * T8; } else { /* v4.0 */ PowWeffWr = pParam->B4SOIrds0denom * here->B4SOInf; T10 = pParam->B4SOIprt * T3; /* External Rd(V) */ T1 = pParam->B4SOIrdw + T10; T2 = model->B4SOIrdwmin + T10; rd0 = T1 / PowWeffWr; rdwmin = T2 / PowWeffWr; drd0_dT = pParam->B4SOIprt / PowWeffWr * T8; drdwmin_dT = drd0_dT; /* External Rs(V) */ T7 = pParam->B4SOIrsw + T10; T4 = model->B4SOIrswmin + T10; rs0 = T7 / PowWeffWr; rswmin = T4 / PowWeffWr; drs0_dT = drswmin_dT = drd0_dT; } ua = pParam->B4SOIuatemp + pParam->B4SOIua1 * T3; ub = pParam->B4SOIubtemp + pParam->B4SOIub1 * T3; uc = pParam->B4SOIuctemp + pParam->B4SOIuc1 * T3; dua_dT = pParam->B4SOIua1 * T8; dub_dT = pParam->B4SOIub1 * T8; duc_dT = pParam->B4SOIuc1 * T8; } else { vbi = pParam->B4SOIvbi; vfbb = pParam->B4SOIvfbb; phi = pParam->B4SOIphi; sqrtPhi = pParam->B4SOIsqrtPhi; Xdep0 = pParam->B4SOIXdep0; /* Eg = model->B4SOIeg0; */ /* Bug fix #11 Jun 09 'Eg is evaluated at Temp, not Tnom' */ Eg = model->B4SOIeg; /* 'model->B4SOIeg' computed in b4soitemp.c */ /* v4.1 */ /* Since selfheat=0, using Eg from b4soitemp.c*/ cdep0 = pParam->B4SOIcdep0; theta0vb0 = pParam->B4SOItheta0vb0; thetaRout = pParam->B4SOIthetaRout; jbjts = pParam->B4SOIjbjts; /* v4.0 */ jbjtd = pParam->B4SOIjbjtd; jdifs = pParam->B4SOIjdifs; jdifd = pParam->B4SOIjdifd; jrecs = pParam->B4SOIjrecs; jrecd = pParam->B4SOIjrecd; jtuns = pParam->B4SOIjtuns; jtund = pParam->B4SOIjtund; /* v2.2.2 bug fix */ Ahlis = pParam->B4SOIahli0s; Ahlid = pParam->B4SOIahli0d; u0temp = here->B4SOIu0temp; vsattemp = here->B4SOIvsattemp; ua = pParam->B4SOIua; ub = pParam->B4SOIub; uc = pParam->B4SOIuc; dni_dT = dvbi_dT = dvfbb_dT = 0.0; djbjts_dT = djdifs_dT = djrecs_dT = djtuns_dT = 0.0; djbjtd_dT = djdifd_dT = djrecd_dT = djtund_dT = 0.0; du0temp_dT = dvsattemp_dT = 0.0; dua_dT = dub_dT = duc_dT = 0.0; /* v4.1 */ dphi_dT = dsqrtPhi_dT = dXdep0_dT = 0.0; dcdep0_dT = dtheta0vb0_dT = dthetaRout_dT = 0.0; if (!model->B4SOIrdsMod) { rds0 = pParam->B4SOIrds0; drds0_dT = 0.0; } else { rd0 = pParam->B4SOIrd0; rs0 = pParam->B4SOIrs0; rdwmin = pParam->B4SOIrdwmin; rswmin = pParam->B4SOIrswmin; drd0_dT = drs0_dT = drdwmin_dT = drswmin_dT = 0.0; } dAhlis_dT = dAhlid_dT = 0; } /* TempRatio used for Vth and mobility */ if (selfheat) { TempRatioMinus1 = Temp / model->B4SOItnom - 1.0; } else { TempRatioMinus1 = ckt->CKTtemp / model->B4SOItnom - 1.0; } /* determine DC current and derivatives */ vbd = vbs - vds; vgd = vgs - vds; vgb = vgs - vbs; ved = ves - vds; veb = ves - vbs; vge = vgs - ves; vpd = vps - vds; vgp = vgs - vps; /* v3.1 added for RF */ vged = vges - vds; vgmd = vgms - vds; vgme = vgms - ves; /* v3.1 added for RF end */ vgmb = vgms - vbs; /* v3.2 bug fix */ agidl = pParam->B4SOIagidl; bgidl = pParam->B4SOIbgidl; cgidl = pParam->B4SOIcgidl; egidl = pParam->B4SOIegidl; rgidl = pParam->B4SOIrgidl; kgidl = pParam->B4SOIkgidl; fgidl = pParam->B4SOIfgidl; agisl = pParam->B4SOIagisl; bgisl = pParam->B4SOIbgisl; cgisl = pParam->B4SOIcgisl; egisl = pParam->B4SOIegisl; rgisl = pParam->B4SOIrgisl; kgisl = pParam->B4SOIkgisl; fgisl = pParam->B4SOIfgisl; if (vds >= 0.0) { /* normal mode */ here->B4SOImode = 1; Vds = vds; Vgs = vgs; Vbs = vbs; Vbd = vbd; Ves = ves; Vps = vps; Vsbs = vsbs; /* v4.0 */ Vdbs = vdbs; /* v4.0 */ Vdbd = Vdbs - Vds; /* v4.0 */ Vgd = vgd; /* v4.1 */ wdios = pParam->B4SOIwdios; wdiod = pParam->B4SOIwdiod; ndiode = pParam->B4SOIndiode; /* v4.2 bugfix*/ ndioded = pParam->B4SOIndioded; /* v4.2 bugfix*/ nrecf0s = pParam->B4SOInrecf0; /* bugfix_snps start for junction DC part*/ nrecf0d = pParam->B4SOInrecf0d; nrecr0s = pParam->B4SOInrecr0; nrecr0d = pParam->B4SOInrecr0d; vrec0s = pParam->B4SOIvrec0; vrec0d = pParam->B4SOIvrec0d; ntuns = pParam->B4SOIntun; ntund = pParam->B4SOIntund; vtun0s = pParam->B4SOIvtun0; vtun0d = pParam->B4SOIvtun0d; /* bugfix_snps end for junction DC part*/ } else { /* inverse mode */ here->B4SOImode = -1; Vds = -vds; Vgs = vgd; Vbs = vbd; Vbd = vbs; Ves = ved; Vps = vpd; Vsbs = vdbd; /* v4.0 */ Vdbd = vsbs; /* v4.0 */ Vdbs = Vdbd + Vds; /* v4.0 */ Vgd = vgs; /* v4.1 */ wdios = pParam->B4SOIwdiod; wdiod = pParam->B4SOIwdios; ndiode = pParam->B4SOIndioded; /* v4.2 bugfix*/ ndioded = pParam->B4SOIndiode; /* v4.2 bugfix*/ nrecf0s = pParam->B4SOInrecf0d; /* bugfix_snps start for junction DC part*/ nrecf0d = pParam->B4SOInrecf0; nrecr0s = pParam->B4SOInrecr0d; nrecr0d = pParam->B4SOInrecr0; vrec0s = pParam->B4SOIvrec0d; vrec0d = pParam->B4SOIvrec0; ntuns = pParam->B4SOIntund; ntund = pParam->B4SOIntun; vtun0s = pParam->B4SOIvtun0d; vtun0d = pParam->B4SOIvtun0; /* bugfix_snps end for junction DC part*/ } if( vds < 0.0) {/*Diode current*/ T0 = jbjts; T1 = djbjts_dT; jbjts = jbjtd; djbjts_dT = djbjtd_dT; jbjtd = T0; djbjtd_dT = T1; T0 = jdifs; T1 = djdifs_dT; jdifs = jdifd; djdifs_dT = djdifd_dT; jdifd = T0; djdifd_dT = T1; T0 = jrecs; T1 = djrecs_dT; jrecs = jrecd; djrecs_dT = djrecd_dT; jrecd = T0; djrecd_dT = T1; T0 = jtuns; T1 = djtuns_dT; jtuns = jtund; djtuns_dT = djtund_dT; jtund = T0; djtund_dT = T1; /*GISL/GIDL*/ T0 = agidl; agidl = agisl; agisl = T0; T0 = bgidl; bgidl = bgisl; bgisl = T0; T0 = cgidl; cgidl = cgisl; cgisl = T0; T0 = egidl; egidl = egisl; egisl = T0; T0 = rgidl; rgidl = rgisl; rgisl = T0; T0 = kgidl; kgidl = kgisl; kgisl = T0; T0 = fgidl; fgidl = fgisl; fgisl = T0; T0 = Ahlis; /* bugfix_snps */ Ahlis = Ahlid; /* bugfix_snps */ Ahlid = T0; /* bugfix_snps */ T0 = dAhlis_dT; /* bugfix_snps */ dAhlis_dT = dAhlid_dT; /* bugfix_snps */ dAhlid_dT = T0; /* bugfix_snps */ } vbs_jct = (!here->B4SOIrbodyMod) ? Vbs : Vsbs; /* v4.0 */ vbd_jct = (!here->B4SOIrbodyMod) ? Vbd : Vdbd; /* v4.0 */ Vesfb = Ves - vfbb; Cbox = model->B4SOIcbox; K1 = pParam->B4SOIk1eff; ChargeComputationNeeded = ((ckt->CKTmode & (MODEAC | MODETRAN | MODEINITSMSIG)) || ((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC))) ? 1 : 0; if (here->B4SOIdebugMod <0) ChargeComputationNeeded = 1; #ifdef B4SOI_DEBUG_OUT ChargeComputationNeeded = 1; here->B4SOIdebug1 = 0.0; here->B4SOIdebug2 = 0.0; here->B4SOIdebug3 = 0.0; #endif /* Poly Gate Si Depletion Effect */ T0 = here->B4SOIvfb + phi; if (model->B4SOImtrlMod==0) epsgate = epssub; else epsgate = model->B4SOIepsrgate * EPS0; if ((pParam->B4SOIngate > 1.e18) && (pParam->B4SOIngate < 1.e25) && (Vgs > T0)&& (epsgate!=0)) /* added to avoid the problem caused by ngate */ { T1 = 1.0e6 * Charge_q * epsgate * pParam->B4SOIngate / (model->B4SOIcox * model->B4SOIcox); T4 = sqrt(1.0 + 2.0 * (Vgs - T0) / T1); T2 = T1 * (T4 - 1.0); T3 = 0.5 * T2 * T2 / T1; /* T3 = Vpoly */ /* T7 = 1.12 - T3 - 0.05; */ T7 = eggdep - T3 - 0.05; /* bugfix: v4.3.1 -Tanvir */ T6 = sqrt(T7 * T7 + 0.224); /* T5 = 1.12 - 0.5 * (T7 + T6); */ T5 = eggdep - 0.5 * (T7 + T6); /* bugfix: v4.3.1 -Tanvir */ Vgs_eff = Vgs - T5; dVgs_eff_dVg = 1.0 - (0.5 - 0.5 / T4) * (1.0 + T7 / T6); /* 7 new lines Wagner */ if (selfheat) { dTL2_dT = - dphi_dT / T4; dTL3_dT = T2 * dTL2_dT / T1; dTL6_dT = - T7 * dTL3_dT / T6; dVgs_eff_dT = 0.5 * (dTL6_dT - dTL3_dT); } else dVgs_eff_dT = 0.0; } else { Vgs_eff = Vgs; dVgs_eff_dVg = 1.0; dVgs_eff_dT = 0.0; /* new line Wagner */ } if ((pParam->B4SOIngate > 1.e18) && (pParam->B4SOIngate < 1.e25)/* Bug fix # 25/26 Vgd_eff defined */ && (Vgd > T0)&& (epsgate!=0)) /* added to avoid the problem caused by ngate */ { T1 = 1.0e6 * Charge_q * epsgate * pParam->B4SOIngate / (model->B4SOIcox * model->B4SOIcox); T4 = sqrt(1.0 + 2.0 * (Vgd - T0) / T1); T2 = T1 * (T4 - 1.0); T3 = 0.5 * T2 * T2 / T1; /* T3 = Vpoly */ /* T7 = 1.12 - T3 - 0.05; */ T7 = eggdep - T3 - 0.05; /* bugfix: v4.3.1 -Tanvir */ T6 = sqrt(T7 * T7 + 0.224); /* T5 = 1.12 - 0.5 * (T7 + T6); */ T5 = eggdep - 0.5 * (T7 + T6); /* bugfix: v4.3.1 -Tanvir */ Vgd_eff = Vgd - T5; dVgd_eff_dVg = 1.0 - (0.5 - 0.5 / T4) * (1.0 + T7 / T6); /* 7 new lines Wagner */ if (selfheat) { dTL2_dT = - dphi_dT / T4; dTL3_dT = T2 * dTL2_dT / T1; dTL6_dT = - T7 * dTL3_dT / T6; dVgd_eff_dT = 0.5 * (dTL6_dT - dTL3_dT); } else dVgd_eff_dT = 0.0; } else { Vgd_eff = Vgd; dVgd_eff_dVg = 1.0; dVgd_eff_dT = 0.0; /* new line Wagner */ } /* if( here->B4SOImode != 1){ T1=Vgs_eff; Vgs_eff=Vgd_eff; Vgd_eff=T1; T2=dVgs_eff_dVg; dVgs_eff_dVg=dVgd_eff_dVg; dVgd_eff_dVg=T2; } */ /* v4.1 for improved BT charge model, no poly depletion */ Vgs_eff2 = Vgs; dVgs_eff2_dVg = 1.0; /* end v4.1 for improved BT charge model */ Leff = pParam->B4SOIleff; if (selfheat) { Vtm = KboQ * Temp; dVtm_dT = KboQ; } else { Vtm = model->B4SOIvtm; dVtm_dT = 0.0; } V0 = vbi - phi; /* begin of v3.0 block addition */ /* B/S built-in potential lowering calculation */ if (here->B4SOIsoiMod == 0) /* BSIMPD */ /* v3.2 */ { Vbsmos = Vbs; dVbsmos_dVg = 0.0; dVbsmos_dVd = 0.0; dVbsmos_dVb = 1.0; dVbsmos_dVe = 0.0; /* LFW_FD 5 new lines */ dVbs_dVg = 0.0; dVbs_dVd = 0.0; dVbs_dVb = 1.0; dVbs_dVe = 0.0; dVbs_dT = 0.0; dVbsmos_dT = 0.0; Vbp = Vbs - Vps; dVbp_dVb = 1; } else /* soiMod = 1 or 2: adding FD module on top of BSIMPD */ { /* prepare Vbs0 & Vbs0mos for VthFD calculation */ if (model->B4SOIfdMod == 0) /* v4.0 */ { T0 = -model->B4SOIdvbd1 * pParam->B4SOIleff / pParam->B4SOIlitl; T1 = model->B4SOIdvbd0 * (exp(0.5*T0) + 2*exp(T0)); T2 = T1 * (vbi - phi); T3 = 0.5 * pParam->B4SOIqsi / model->B4SOIcsi; /* v3.2 */ Vbs0t = phi - T3 + model->B4SOIvbsa + T2; dVbs0t_dVd = 0.0; dVbs0_dVd = 0.0; if (selfheat) /* dVbs0t_dT = T1 * dvbi_dT; */ dVbs0t_dT = (1.0 - T1) * dphi_dT + T1 * dvbi_dT; /* LFW_FD new line */ else dVbs0t_dT = 0.0; T0 = 1 + model->B4SOIcsi / Cbox; T3 = -model->B4SOIdk2b * pParam->B4SOIleff / pParam->B4SOIlitl; T5 = model->B4SOIk2b * (exp(0.5*T3) + 2*exp(T3)); T1 = (model->B4SOIk1b - T5) / T0; T2 = T1 * Vesfb; T4 = 1.0/(1 + Cbox / model->B4SOIcsi); Vbs0 = T4 * Vbs0t + T2; dVbs0_dVe = T1; dVbs0_dVd = 0.0; /* flexilint */ if (selfheat) dVbs0_dT = T4 * dVbs0t_dT - T1 * dvfbb_dT; else dVbs0_dT = 0.0; } else { T0 = 1.0/(model->B4SOIcsi + Cbox + model->B4SOIcdsbs); T1 = -model->B4SOIdvbd1 * pParam->B4SOIleff / pParam->B4SOIlitl; T2 = model->B4SOIdvbd0 * (exp(0.5*T1) + 2*exp(T1)); T3 = T2 * (Vds + model->B4SOIvsce); T4 = 0.5 * pParam->B4SOIqsi / model->B4SOIcsi; T5 = model->B4SOIcsi * T0 * (phi - T4 + model->B4SOIvbsa); T6 = model->B4SOIcdsbs * T0 * T3; Vbs0t = T5 + T6; dVbs0t_dVd = model->B4SOIcdsbs * T0 * T2; if (selfheat) /* dVbs0t_dT = 0.0; LFW_FD changed line */ dVbs0t_dT = model->B4SOIcsi * T0 * dphi_dT; else dVbs0t_dT = 0.0; T7 = Cbox * T0 * Vesfb; Vbs0 = Vbs0t + T7; dVbs0_dVe = Cbox * T0; dVbs0_dVd = dVbs0t_dVd; if (selfheat) dVbs0_dT = dVbs0t_dT - Cbox * T0 * dvfbb_dT; else dVbs0_dT = 0.0; } /* zero field body potential cal. */ T1 = Vbs0t - Vbs0 - 0.005; T2 = sqrt(T1 * T1 + (2.5e-5)); T3 = 0.5 * (T1 + T2); T4 = T3 * model->B4SOIcsi / pParam->B4SOIqsi; /* v3.2 */ Vbs0mos = Vbs0 - 0.5 * T3 * T4; T5 = 0.5 * T4 * (1 + T1 / T2); dVbs0mos_dVe = dVbs0_dVe * (1 + T5); /* dVbs0mos_dVd = dVbs0_dVd + T5 * (dVbs0t_dVd - dVbs0_dVd); LFW_FD */ dVbs0mos_dVd = dVbs0_dVd * (1 + T5) - T5 * dVbs0t_dVd; if (selfheat) dVbs0mos_dT = dVbs0_dT * (1 + T5) - T5 * dVbs0t_dT; else dVbs0mos_dT = 0.0; /* set the upperbound of Vbs0mos to be phi for square root calc. */ T1 = phi - 0.02; T2 = T1 - Vbs0mos - 0.005; T3 = sqrt(T2 * T2 + 4.0 * 0.005); Vbs0mos = T1 - 0.5 * (T2 + T3); T4 = 0.5 * (1 + T2 / T3); dVbs0mos_dVe = T4 * dVbs0mos_dVe; dVbs0mos_dVd = T4 * dVbs0mos_dVd; /* v4.1 */ if (selfheat) /* dVbs0mos_dT = T4 * dVbs0mos_dT; */ dVbs0mos_dT = dphi_dT - T4 * (dphi_dT - dVbs0mos_dT); /* v4.1 */ else dVbs0mos_dT = 0.0; /* VthFD calculation */ Phis = phi - Vbs0mos; /* dPhis_dVb = -1; LFW_FD not used */ sqrtPhis = sqrt(Phis); dsqrtPhis_dVb = -0.5 / sqrtPhis; Xdep = Xdep0 * sqrtPhis / sqrtPhi; dXdep_dVb = (Xdep0 / sqrtPhi) * dsqrtPhis_dVb; /* v4.2 bugfix temp deriv */ if (selfheat) { dPhis_dT = dphi_dT - dVbs0mos_dT; dsqrtPhis_dT = 0.5 / sqrtPhis * dPhis_dT; dXdep_dT = dXdep0_dT * sqrtPhis / sqrtPhi + Xdep0 * (dsqrtPhis_dT * sqrtPhi - sqrtPhis * dsqrtPhi_dT) / phi; } else { dPhis_dT = 0.0; dsqrtPhis_dT = 0.0; dXdep_dT = 0.0; }/* v4.2 bugfix temp deriv */ T3 = sqrt(Xdep); T0 = pParam->B4SOIdvt2 * Vbs0mos; dT3_dT = 1.0 / (2.0 * T3) * dXdep_dT; /* v4.2 bugfix temp deriv */ dT0_dT = pParam->B4SOIdvt2 * dVbs0mos_dT; /* v4.2 bugfix temp deriv */ if (T0 >= - 0.5) { T1 = 1.0 + T0; dT1_dT = dT0_dT; /* v4.2 bugfix temp deriv */ T2 = pParam->B4SOIdvt2 ; } else /* Added to avoid any discontinuity problems caused by dvt2 */ { T4 = 1.0 / (3.0 + 8.0 * T0); /* T1 = (1.0 + 3.0 * T0) * T4; */ /* v4.2 bugfix temp deriv */ T5 = 1.0 + 3.0 * T0; /* v4.2 bugfix temp deriv */ T1 = T4 * T5; /* v4.2 bugfix temp deriv */ T2 = pParam->B4SOIdvt2 * T4 * T4 ; dT1_dT = T4 * (3.0 - 8.0 * T5 * T4) * dT0_dT; /* v4.2 bugfix temp deriv */ } lt1 = model->B4SOIfactor1 * T3 * T1; dlt1_dVb =model->B4SOIfactor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2); dlt1_dT = model->B4SOIfactor1 * ( dT3_dT * T1+ T3 * dT1_dT); /* v4.2 bugfix temp deriv */ T0 = pParam->B4SOIdvt2w * Vbs0mos; dT0_dT = pParam->B4SOIdvt2w * dVbs0mos_dT; /* v4.2 bugfix temp deriv */ if (T0 >= - 0.5) { T1 = 1.0 + T0; T2 = pParam->B4SOIdvt2w ; dT1_dT = dT0_dT; /* v4.2 bugfix temp deriv */ } else /* Added to avoid any discontinuity problems caused by dvt2w */ { T4 = 1.0 / (3.0 + 8.0 * T0); /* T1 = (1.0 + 3.0 * T0) * T4; */ /* v4.2 bugfix temp deriv */ T5 = 1.0 + 3.0 * T0; /* v4.2 bugfix temp deriv */ T1 = T4 * T5; /* v4.2 bugfix temp deriv */ T2 = pParam->B4SOIdvt2w * T4 * T4 ; dT1_dT=T4*(3.0-8.0*T5*T4)*dT0_dT ; /* v4.2 bugfix temp deriv */ } ltw= model->B4SOIfactor1 * T3 * T1; dltw_dVb=model->B4SOIfactor1*(0.5 / T3 * T1 * dXdep_dVb + T3 * T2); dltw_dT=model->B4SOIfactor1 *( dT3_dT * T1+ T3 *dT1_dT);/* v4.2 bugfix temp deriv */ T0 = -0.5 * pParam->B4SOIdvt1 * Leff / lt1; if (T0 > -EXPL_THRESHOLD) { T1 = exp(T0); Theta0 = T1 * (1.0 + 2.0 * T1); dT1_dVb = -T0 / lt1 * T1 * dlt1_dVb; dTheta0_dVb = (1.0 + 4.0 * T1) * dT1_dVb; dT1_dT = -T0 / lt1 * T1 * dlt1_dT; /* v4.2 bugfix temp deriv */ dTheta0_dT = (1.0 + 4.0 * T1) * dT1_dT; /* v4.2 bugfix temp deriv */ } else { T1 = MIN_EXPL; Theta0 = T1 * (1.0 + 2.0 * T1); dTheta0_dVb = 0.0; dTheta0_dT = 0; /* v4.2 bugfix temp deriv */ } T2 = pParam->B4SOInfactor * epssub / Xdep; dT2_dVb = - T2 / Xdep * dXdep_dVb; dT2_dT = - T2 / Xdep * dXdep_dT; /* v4.2 bugfix temp deriv */ /* T3 = pParam->B4SOIcdsc + pParam->B4SOIcdscb * Vbseff + pParam->B4SOIcdscd * Vds;*/ /* v4.1 */ T3 = pParam->B4SOIcdsc + pParam->B4SOIcdscb * Vbs0mos + pParam->B4SOIcdscd * Vds; dT3_dVb = pParam->B4SOIcdscb; dT3_dVd = pParam->B4SOIcdscd; T4 = (T2 + T3 * Theta0 + pParam->B4SOIcit) / model->B4SOIcox; dT4_dVb = (dT2_dVb + Theta0 * dT3_dVb + dTheta0_dVb * T3) / model->B4SOIcox; dT4_dVd = Theta0 * dT3_dVd / model->B4SOIcox; dT4_dT = (dT2_dT + T3 * dTheta0_dT + pParam->B4SOIcdscb * dVbs0mos_dT * Theta0) / model->B4SOIcox; /* v4.2 bugfix temp deriv */ if (T4 >= -0.5) { n = 1.0 + T4; dn_dVb = dT4_dVb; dn_dVd = dT4_dVd; dn_dT = dT4_dT; /* v4.2 bugfix temp deriv */ } else { /* avoid discontinuity problems caused by T4 */ T0 = 1.0 / (3.0 + 8.0 * T4); /*n = (1.0 + 3.0 * T4) * T0;*/ /* v4.2 bugfix temp deriv */ T5 = 1.0 + 3.0 * T4; /* v4.2 bugfix temp deriv */ n = T0 * T5;/* v4.2 bugfix temp deriv */ T0 *= T0; dn_dVb = T0 * dT4_dVb; dn_dVd = T0 * dT4_dVd; dn_dT = T0 * (3.0 - 8.0 * T5 * T0) * dT4_dT; /* v4.2 bugfix temp deriv */ } if (pParam->B4SOIdvtp0 > 0.0) { /* v4.0 */ T0 = -pParam->B4SOIdvtp1 * Vds; if (T0 < -EXPL_THRESHOLD) { T2 = MIN_EXPL; dT2_dVd = 0.0; } else { T2 = exp(T0); dT2_dVd = -pParam->B4SOIdvtp1 * T2; } T3 = Leff + pParam->B4SOIdvtp0 * (1.0 + T2); dT3_dVd = pParam->B4SOIdvtp0 * dT2_dVd; T4 = Vtm * log(Leff / T3); dT4_dVd = -Vtm * dT3_dVd / T3; DITS_Sft = n * T4; dDITS_Sft_dVd = dn_dVd * T4 + n * dT4_dVd; dDITS_Sft_dVb = T4 * dn_dVb; if (selfheat) { /* dDITS_Sft_dT = n * KboQ * log(Leff / T3); *//* v4.2 bugfix temp deriv */ dDITS_Sft_dT = n * KboQ * log(Leff / T3) + dn_dT * T4; /* v4.2 bugfix temp deriv */ } else dDITS_Sft_dT = 0.0; } else { DITS_Sft = dDITS_Sft_dVd = dDITS_Sft_dVb = 0.0; dDITS_Sft_dT = 0.0; } here->B4SOIthetavth = pParam->B4SOIdvt0 * Theta0; Delt_vth = here->B4SOIthetavth * V0; dDelt_vth_dVb = pParam->B4SOIdvt0 * dTheta0_dVb * V0; if (selfheat) /*dDelt_vth_dT = here->B4SOIthetavth * dvbi_dT;*/ /*dDelt_vth_dT = here->B4SOIthetavth * (dvbi_dT - dphi_dT); */ dDelt_vth_dT = pParam->B4SOIdvt0 * (dTheta0_dT * V0 + Theta0 * (dvbi_dT - dphi_dT)); /* v4.2 bugfix temp deriv */ else dDelt_vth_dT = 0.0; T0 = -0.5 * pParam->B4SOIdvt1w * pParam->B4SOIweff * Leff / ltw; if (T0 > -EXPL_THRESHOLD) { T1 = exp(T0); T2 = T1 * (1.0 + 2.0 * T1); dT1_dVb = -T0 / ltw * T1 * dltw_dVb; dT2_dVb = (1.0 + 4.0 * T1) * dT1_dVb; dT2_dT = -(1.0 + 4.0 * T1) * T1 * T0/ltw * dltw_dT; } else { T1 = MIN_EXPL; T2 = T1 * (1.0 + 2.0 * T1); dT2_dVb = 0.0; dT2_dT = 0; } T0 = pParam->B4SOIdvt0w * T2; DeltVthw = T0 * V0; dDeltVthw_dVb = pParam->B4SOIdvt0w * dT2_dVb * V0; if (selfheat) /* dDeltVthw_dT = T0 * dvbi_dT; */ /* dDeltVthw_dT = T0 * (dvbi_dT - dphi_dT); v4.1 */ /* v4.2 bugfix temp deriv */ dDeltVthw_dT = T0 * (dvbi_dT - dphi_dT) + pParam->B4SOIdvt0w * dT2_dT * V0; /* v4.2 bugfix temp deriv */ else dDeltVthw_dT = 0.0; T0 = sqrt(1.0 + pParam->B4SOIlpe0 / Leff); T1 = (pParam->B4SOIkt1 + pParam->B4SOIkt1l / Leff + pParam->B4SOIkt2 * Vbs0mos); /* v4.0 */ /* DeltVthtemp = pParam->B4SOIk1eff * (T0 - 1.0) * sqrtPhi + T1 * TempRatioMinus1; */ DeltVthtemp = pParam->B4SOIk1ox * (T0 - 1.0) * sqrtPhi + T1 * TempRatioMinus1; /* v4.0 end */ if (selfheat) /* dDeltVthtemp_dT = T1 / model->B4SOItnom; */ /* dDeltVthtemp_dT = pParam->B4SOIk1ox * (T0 - 1.0) * dsqrtPhi_dT + T1 / model->B4SOItnom; v4.1 */ /* v4.2 bugfix temp deriv */ dDeltVthtemp_dT = pParam->B4SOIk1ox * (T0 - 1.0) * dsqrtPhi_dT + T1 / model-> B4SOItnom+ pParam->B4SOIkt2 * dVbs0mos_dT* TempRatioMinus1;/* v4.2 bugfix temp deriv */ else dDeltVthtemp_dT = 0.0; tmp2 = toxe * phi / (pParam->B4SOIweff + pParam->B4SOIw0); dtmp2_dT = toxe * dphi_dT / (pParam->B4SOIweff + pParam->B4SOIw0); /* v4.2 bugfix temp deriv */ T3 = here->B4SOIeta0 + pParam->B4SOIetab * Vbs0mos;/*v4.0*/ dT3_dT = pParam->B4SOIetab * dVbs0mos_dT; /*v4.2 temp deriv*/ if (T3 < 1.0e-4) /* avoid discontinuity problems caused by etab */ { T9 = 1.0 / (3.0 - 2.0e4 * T3); T5 = (2.0e-4 - T3); /*v4.2 temp deriv*/ T3 = T5 * T9; /*(2.0e-4 - T3) * T9;*/ /*v4.2 temp deriv*/ T4 = T9 * T9 * pParam->B4SOIetab; dT3_dVb = T4 ; dT3_dT = (2.0e4 * T5 * T9 * T9 - T9) * dT3_dT; /*v4.2 temp deriv*/ } else { dT3_dVb = pParam->B4SOIetab ; } /* DIBL_Sft = T3 * pParam->B4SOItheta0vb0 * Vds; dDIBL_Sft_dVd = pParam->B4SOItheta0vb0 * T3; dDIBL_Sft_dVb = pParam->B4SOItheta0vb0 * Vds * dT3_dVb; */ /* v4.2 bug fix */ DIBL_Sft = T3 * theta0vb0 * Vds; dDIBL_Sft_dVd = theta0vb0 * T3; dDIBL_Sft_dVb = theta0vb0 * Vds * dT3_dVb; dDIBL_Sft_dT = Vds * (dT3_dT * theta0vb0 + T3 * dtheta0vb0_dT); /* v4.2 bug fix */ Lpe_Vb = sqrt(1.0 + pParam->B4SOIlpeb / Leff); /* 4.1 */ T0 = exp(2.0 * pParam->B4SOIdvtp4 * Vds); DITS_Sft2 = pParam->B4SOIdvtp2factor * (T0-1) / (T0+1); dDITS_Sft2_dVd = pParam->B4SOIdvtp2factor * pParam->B4SOIdvtp4 * 4.0 * T0 / ((T0+1) * (T0+1)); VthFD = model->B4SOItype * here->B4SOIvth0 + (pParam->B4SOIk1ox * sqrtPhis - pParam->B4SOIk1eff * sqrtPhi) * Lpe_Vb - here->B4SOIk2ox * Vbs0mos- Delt_vth - DeltVthw + (pParam->B4SOIk3 + pParam->B4SOIk3b * Vbs0mos) * tmp2 + DeltVthtemp - DIBL_Sft - DITS_Sft - DITS_Sft2; T6 = pParam->B4SOIk3b * tmp2 - here->B4SOIk2ox + pParam->B4SOIkt2 * TempRatioMinus1; dVthFD_dVb = Lpe_Vb * pParam->B4SOIk1ox * dsqrtPhis_dVb - dDelt_vth_dVb - dDeltVthw_dVb + T6 - dDIBL_Sft_dVb - dDITS_Sft_dVb; /* v4.0 */ /* this is actually dVth_dVbs0mos */ dVthFD_dVe = dVthFD_dVb * dVbs0mos_dVe; /* dVthFD_dVd = -dDIBL_Sft_dVd -dDITS_Sft_dVd; */ /* v4.0 */ dVthFD_dVd = dVthFD_dVb * dVbs0mos_dVd - dDIBL_Sft_dVd - dDITS_Sft_dVd - dDITS_Sft2_dVd; /* v4.1 */ if (selfheat) /* dVthFD_dT = dDeltVthtemp_dT - dDelt_vth_dT - dDeltVthw_dT + dVthFD_dVb * dVbs0mos_dT - dDITS_Sft_dT ; */ /* dVthFD_dT = dDeltVthtemp_dT - dDelt_vth_dT - dDeltVthw_dT + dVthFD_dVb * dVbs0mos_dT - dDITS_Sft_dT + Lpe_Vb * ( pParam->B4SOIk1ox * 0.5 / sqrtPhis * dphi_dT - pParam->B4SOIk1eff * dsqrtPhi_dT); v4.1 */ /* LFW_FD fixed expression */ dVthFD_dT = (pParam->B4SOIk1ox * dsqrtPhis_dT - pParam->B4SOIk1eff * dsqrtPhi_dT) * Lpe_Vb - here->B4SOIk2ox * dVbs0mos_dT - dDelt_vth_dT - dDeltVthw_dT + pParam->B4SOIk3b * dVbs0mos_dT * tmp2 + (pParam->B4SOIk3 + pParam->B4SOIk3b * Vbs0mos) * dtmp2_dT + dDeltVthtemp_dT - dDIBL_Sft_dT - dDITS_Sft_dT; else dVthFD_dT = 0.0; /* VtgseffFD calculation for PhiFD */ VtgsFD = VthFD - Vgs_eff; T10 = model->B4SOInofffd * Vtm; DEXP( ((VtgsFD - model->B4SOIvofffd)/ T10), ExpVtgsFD, T0); VtgseffFD = T10 * log(1.0 + ExpVtgsFD); T0 /= (1.0 + ExpVtgsFD); dVtgseffFD_dVd = T0 * dVthFD_dVd; dVtgseffFD_dVg = -T0 * dVgs_eff_dVg; dVtgseffFD_dVe = T0 * dVthFD_dVe; if (selfheat) /* fix below 1st line of expression - Wagner */ /*dVtgseffFD_dT = T0 * (dVthFD_dT - (VtgsFD - model->B4SOIvofffd)/Temp) */ dVtgseffFD_dT = T0 * (dVthFD_dT - dVgs_eff_dT - (VtgsFD - model->B4SOIvofffd)/Temp) + VtgseffFD/Temp; else dVtgseffFD_dT = 0.0; /* surface potential modeling at strong inversion: PhiON */ VgstFD = Vgs_eff - VthFD; DEXP( ((VgstFD - model->B4SOIvofffd)/ T10), ExpVgstFD, T0); VgsteffFD = T10 * log(1.0 + ExpVgstFD); T0 /= (1.0 + ExpVgstFD); dVgsteffFD_dVd = -T0 * dVthFD_dVd; dVgsteffFD_dVg = T0 * dVgs_eff_dVg; dVgsteffFD_dVe = -T0 * dVthFD_dVe; if (selfheat) /* fix below 1st line of expression - Wagner */ /*dVgsteffFD_dT = T0 * (-dVthFD_dT */ dVgsteffFD_dT = T0 * (dVgs_eff_dT - dVthFD_dT - (VgstFD - model->B4SOIvofffd)/Temp) + VgsteffFD/Temp; else dVgsteffFD_dT = 0.0; /* T1 = model->B4SOImoinFD*pParam->B4SOIk1eff*Vtm*Vtm; */ T1 = model->B4SOImoinFD*pParam->B4SOIk1ox*Vtm*Vtm; if (selfheat) dT1_dT = 2*T1/Temp; else dT1_dT=0.0; T2 = VgsteffFD+ 2*pParam->B4SOIk1eff*sqrt(phi); dT2_dVg = dVgsteffFD_dVg; dT2_dVd = dVgsteffFD_dVd; dT2_dVe = dVgsteffFD_dVe; /* if (selfheat) dT2_dT = dVgsteffFD_dT; */ if (selfheat) dT2_dT = dVgsteffFD_dT + 2*pParam->B4SOIk1eff*dsqrtPhi_dT; /* v4.1 */ else dT2_dT = 0.0; T0 = 1+ VgsteffFD * T2 / T1; dT0_dVg = (VgsteffFD * dT2_dVg + T2 * dVgsteffFD_dVg) / T1; dT0_dVd = (VgsteffFD * dT2_dVd + T2 * dVgsteffFD_dVd) / T1; dT0_dVe = (VgsteffFD * dT2_dVe + T2 * dVgsteffFD_dVe) / T1; if (selfheat) dT0_dT = (VgsteffFD * (dT2_dT - T2/T1 * dT1_dT) + T2 * dVgsteffFD_dT) / T1; else dT0_dT = 0.0; PhiON = phi + Vtm* log(T0) ; dPhiON_dVg = Vtm* dT0_dVg/T0 ; dPhiON_dVd = Vtm* dT0_dVd/T0 ; dPhiON_dVe = Vtm* dT0_dVe/T0 ; if (selfheat) dPhiON_dT = dphi_dT + Vtm* dT0_dT/T0 + (PhiON-phi)/Temp ; /* v4.1 */ else dPhiON_dT = 0.0; /* surface potential from subthreshold to inversion: PhiFD */ T0 = model->B4SOIcox / (model->B4SOIcox + 1.0/(1.0/model->B4SOIcsi + 1.0/Cbox)); PhiFD = PhiON - T0 * VtgseffFD; dPhiFD_dVg = dPhiON_dVg - T0 * dVtgseffFD_dVg; dPhiFD_dVd = dPhiON_dVd - T0 * dVtgseffFD_dVd; dPhiFD_dVe = dPhiON_dVe - T0 * dVtgseffFD_dVe; if (selfheat) dPhiFD_dT = dPhiON_dT - T0 * dVtgseffFD_dT; else dPhiFD_dT = 0; /* built-in potential lowering: Vbs0 */ if (model->B4SOIfdMod == 0) /* v4.0 */ { T0 = -model->B4SOIdvbd1 * pParam->B4SOIleff / pParam->B4SOIlitl; T1 = model->B4SOIdvbd0 * (exp(0.5*T0) + 2*exp(T0)); T2 = T1 * (vbi - phi); T3 = 0.5 * pParam->B4SOIqsi / model->B4SOIcsi; /* v3.2 */ Vbs0t = PhiFD - T3 + model->B4SOIvbsa + T2; dVbs0t_dVg = dPhiFD_dVg; dVbs0t_dVd = dPhiFD_dVd; dVbs0t_dVe = dPhiFD_dVe; if (selfheat) dVbs0t_dT = dPhiFD_dT + T1 * (dvbi_dT - dphi_dT); /* v4.1 */ else dVbs0t_dT = 0; T0 = 1 + model->B4SOIcsi / Cbox; T3 = -model->B4SOIdk2b * pParam->B4SOIleff / pParam->B4SOIlitl; T5 = model->B4SOIk2b * (exp(0.5*T3) + 2*exp(T3)); T1 = (model->B4SOIk1b - T5) / T0; T2 = T1 * Vesfb; T0 = 1.0/(1 + Cbox / model->B4SOIcsi); Vbs0 = T0 * Vbs0t + T2; dVbs0_dVg = T0 * dVbs0t_dVg; dVbs0_dVd = T0 * dVbs0t_dVd; dVbs0_dVe = T0 * dVbs0t_dVe + T1; if (selfheat) dVbs0_dT = T0 * dVbs0t_dT - T1 * dvfbb_dT; else dVbs0_dT = 0.0; } else /* v4.1 */ { T0 = 1.0/(model->B4SOIcsi + Cbox + model->B4SOIcdsbs); T1 = -model->B4SOIdvbd1 * pParam->B4SOIleff / pParam->B4SOIlitl; T2 = model->B4SOIdvbd0 * (exp(0.5*T1) + 2*exp(T1)); T3 = T2 * (Vds + model->B4SOIvsce); T4 = 0.5 * pParam->B4SOIqsi / model->B4SOIcsi; T5 = model->B4SOIcsi * T0 * (PhiFD - T4 + model->B4SOIvbsa); T6 = model->B4SOIcdsbs * T0 * T3; Vbs0t = T5 + T6; T8 = model->B4SOIcsi * T0; dVbs0t_dVg = T8 * dPhiFD_dVg; dVbs0t_dVd = T8 * dPhiFD_dVd + model->B4SOIcdsbs * T0 * T2; dVbs0t_dVe = T8 * dPhiFD_dVe; if (selfheat) dVbs0t_dT = T8 * dPhiFD_dT; else dVbs0t_dT = 0.0; T7 = Cbox * T0 * Vesfb; Vbs0 = Vbs0t + T7; dVbs0_dVg = dVbs0t_dVg; dVbs0_dVe = dVbs0t_dVe + Cbox * T0; dVbs0_dVd = dVbs0t_dVd; if (selfheat) dVbs0_dT = dVbs0t_dT - Cbox * T0 * dvfbb_dT; else dVbs0_dT = 0.0; } /* set lowerbound of Vbs (from SPICE) to Vbs0: Vbsitf (Vbs at back interface) */ if (here->B4SOIsoiMod == 2) /* v3.2 */ /* v3.1 ideal FD: Vbsitf is pinned at Vbs0 */ { Vbs = Vbsitf = Vbs0 + OFF_Vbsitf; dVbsitf_dVg = dVbs0_dVg; dVbsitf_dVd = dVbs0_dVd; dVbsitf_dVe = dVbs0_dVe; /*dVbsitf_dVb = 0.0; */ /*if (selfheat) dVbsitf_dT = dVbs0_dT; */ /*else dVbsitf_dT = 0; */ /* LFW_FD fix */ dVbs_dVg = dVbsitf_dVg; dVbs_dVd = dVbsitf_dVd; dVbs_dVb = dVbsitf_dVb = 0.0; dVbs_dVe = dVbsitf_dVe; if (selfheat) {dVbsitf_dT = dVbs0_dT; dVbs_dT = dVbsitf_dT;} else {dVbsitf_dT = 0; dVbs_dT = 0;} } else /* soiMod = 1 */ { T1 = Vbs - (Vbs0 + OFF_Vbsitf) - 0.01; T2 = sqrt(T1*T1 + 0.0001); T3 = 0.5 * (1 + T1/T2); Vbsitf = (Vbs0 + OFF_Vbsitf) + 0.5 * (T1 + T2); dVbsitf_dVg = (1 - T3) * dVbs0_dVg; dVbsitf_dVd = (1 - T3) * dVbs0_dVd; dVbsitf_dVe = (1 - T3) * dVbs0_dVe; dVbsitf_dVb = T3 ; /* LFW_FD 7 new lines */ /* Note that Vbs has not been redefined */ /* dVbs_dVb = dVbsitf_dVb; */ dVbs_dVg = 0.0; dVbs_dVd = 0.0; dVbs_dVb = 1.0; dVbs_dVe = 0.0; dVbs_dT = 0.0; if (selfheat) dVbsitf_dT = (1 - T3) * dVbs0_dT; else dVbsitf_dT = 0.0; } /* Based on Vbsitf, calculate zero-field body potential for MOS: Vbsmos */ T1 = Vbs0t - Vbsitf - 0.005; T2 = sqrt(T1 * T1 + (2.5e-5)); T3 = 0.5 * (T1 + T2); T4 = T3 * model->B4SOIcsi / pParam->B4SOIqsi; /* v3.2 */ Vbsmos = Vbsitf - 0.5 * T3 * T4; T5 = 0.5 * T4 * (1 + T1 / T2); dVbsmos_dVg = dVbsitf_dVg * (1 + T5) - T5 * dVbs0t_dVg; dVbsmos_dVd = dVbsitf_dVd * (1 + T5) - T5 * dVbs0t_dVd; dVbsmos_dVb = dVbsitf_dVb * (1 + T5); dVbsmos_dVe = dVbsitf_dVe * (1 + T5) - T5 * dVbs0t_dVe; if (selfheat) dVbsmos_dT = dVbsitf_dT * (1 + T5) - T5 * dVbs0t_dT; else dVbsmos_dT = 0.0; /* Vbsmos should be used in MOS after some limiting (Vbseff) */ Vbp = Vbs - Vps; dVbp_dVb = 1; } /* end of v3.0 block edition */ /* v3.0 modification */ /* T2 is Vbsmos limited above Vbsc=-5 */ T0 = Vbsmos + 5 - 0.001; T1 = sqrt(T0 * T0 - 0.004 * (-5)); T2 = (-5) + 0.5 * (T0 + T1); dT2_dVb = (0.5 * (1.0 + T0 / T1)) * dVbsmos_dVb; dT2_dVg = (0.5 * (1.0 + T0 / T1)) * dVbsmos_dVg; dT2_dVd = (0.5 * (1.0 + T0 / T1)) * dVbsmos_dVd; dT2_dVe = (0.5 * (1.0 + T0 / T1)) * dVbsmos_dVe; if (selfheat) dT2_dT = (0.5 * (1.0 + T0 / T1)) * dVbsmos_dT; else dT2_dT = 0.0; /* Vbsh is T2 limited below 1.5 */ T0 = 1.5; T1 = T0 - T2 - 0.002; T3 = sqrt(T1 * T1 + 0.008 * T0); Vbsh = T0 - 0.5 * (T1 + T3); dVbsh_dVb = 0.5 * (1.0 + T1 / T3) * dT2_dVb; dVbsh_dVg = 0.5 * (1.0 + T1 / T3) * dT2_dVg; dVbsh_dVd = 0.5 * (1.0 + T1 / T3) * dT2_dVd; dVbsh_dVe = 0.5 * (1.0 + T1 / T3) * dT2_dVe; if (selfheat) dVbsh_dT = 0.5 * (1.0 + T1 / T3) * dT2_dT; else dVbsh_dT = 0.0; /* Vbseff is Vbsh limited to 0.95*phi */ T0 = 0.95 * phi; T1 = T0 - Vbsh - 0.002; T2 = sqrt(T1 * T1 + 0.008 * T0); Vbseff = T0 - 0.5 * (T1 + T2); dVbseff_dVb = 0.5 * (1.0 + T1 / T2) * dVbsh_dVb; dVbseff_dVg = 0.5 * (1.0 + T1 / T2) * dVbsh_dVg; dVbseff_dVd = 0.5 * (1.0 + T1 / T2) * dVbsh_dVd; dVbseff_dVe = 0.5 * (1.0 + T1 / T2) * dVbsh_dVe; /* if (selfheat) dVbseff_dT = 0.5 * (1.0 + T1 / T2) * dVbsh_dT; */ if (selfheat) { dT0_dT = 0.95 * dphi_dT; dT1_dT = dT0_dT - dVbsh_dT; dVbseff_dT = dT0_dT - 0.5 * (1.0 + T1 / T2) * dT1_dT - 0.002 * dT0_dT / T2; } /* v4.1 */ else dVbseff_dT = 0.0; here->B4SOIvbseff = Vbseff; /* SPICE sol. */ /* end of v3.0 modification */ /* Below all the variables refer to Vbseff */ /* LFW_FD comment out next 6 lines */ /*if (dVbseff_dVb < 1e-20) { */ /* dVbseff_dVb = 1e-20; */ /* dVbsh_dVb *= 1e20; */ /*} */ /*else */ /* dVbsh_dVb /= dVbseff_dVb; */ /*=======================================================================*/ /* Some derivatives were originally taken w.r.t. Vbseff, and named *_dVb */ /* Later in the code, they were corrected by multiplying or dividing */ /* by dVbseff_dVb. */ /* Now, all derivatives labeled *_dVb are taken w.r.t. Vbs */ /* The correction factor "dVbseff_dVb" has been removed where it is */ /* no longer needed. */ /*=======================================================================*/ Phis = phi - Vbseff; /* dPhis_dVb = -1; LFW_FD not uesed */ sqrtPhis = sqrt(Phis); /*dsqrtPhis_dVb = -0.5 / sqrtPhis; */ /* LFW_FD fix/add 4 lines */ dsqrtPhis_dVg = -0.5 * dVbseff_dVg / sqrtPhis; dsqrtPhis_dVd = -0.5 * dVbseff_dVd / sqrtPhis; dsqrtPhis_dVb = -0.5 * dVbseff_dVb / sqrtPhis; dsqrtPhis_dVe = -0.5 * dVbseff_dVe / sqrtPhis; Xdep = Xdep0 * sqrtPhis / sqrtPhi; /*dXdep_dVb = (Xdep0 / sqrtPhi) * dsqrtPhis_dVb; */ /* LFW_FD fix/add 4 lines */ dXdep_dVg = Xdep0 * dsqrtPhis_dVg / sqrtPhi; dXdep_dVd = Xdep0 * dsqrtPhis_dVd / sqrtPhi; dXdep_dVb = Xdep0 * dsqrtPhis_dVb / sqrtPhi; dXdep_dVe = Xdep0 * dsqrtPhis_dVe / sqrtPhi; /* v4.1 */ if (selfheat) { dPhis_dT = dphi_dT - dVbseff_dT; dsqrtPhis_dT = 0.5 / sqrtPhis * dPhis_dT; /* dXdep_dT = dXdep0_dT * sqrtPhis / sqrtPhi + (dsqrtPhis_dT * sqrtPhi - sqrtPhis * dsqrtPhi_dT) / phi; v4.2 Temp Deriv bugfix */ dXdep_dT = dXdep0_dT * sqrtPhis / sqrtPhi + Xdep0 * (dsqrtPhis_dT * sqrtPhi - sqrtPhis * dsqrtPhi_dT) / phi; } else { dPhis_dT = 0.0; dsqrtPhis_dT = 0.0; dXdep_dT = 0.0; } /* end v4.1 */ /* Calculate nstar v3.2 */ /* here->B4SOInstar = model->B4SOIvtm / Charge_q * */ here->B4SOInstar = Vtm / Charge_q * (model->B4SOIcox + epssub / Xdep + pParam->B4SOIcit); /* Vth Calculation */ T3 = sqrt(Xdep); T0 = pParam->B4SOIdvt2 * Vbseff; if (T0 >= - 0.5) { T1 = 1.0 + T0; T2 = pParam->B4SOIdvt2 ; } else /* Added to avoid any discontinuity problems caused by dvt2 */ { T4 = 1.0 / (3.0 + 8.0 * T0); T1 = (1.0 + 3.0 * T0) * T4; T2 = pParam->B4SOIdvt2 * T4 * T4 ; } lt1 = model->B4SOIfactor1 * T3 * T1; /* dlt1_dVb =model->B4SOIfactor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2); */ /* LFW_FD fix/add 4 lines */ dlt1_dVg = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVg + 0.5 * T1 * dXdep_dVg / T3); dlt1_dVd = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVd + 0.5 * T1 * dXdep_dVd / T3); dlt1_dVb = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVb + 0.5 * T1 * dXdep_dVb / T3); dlt1_dVe = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVe + 0.5 * T1 * dXdep_dVe / T3); /* fix below expression Wagner */ /*if (selfheat) dlt1_dT = model->B4SOIfactor1 * T1 * 0.5 / T3 * dXdep_dT;*/ if (selfheat) dlt1_dT = model->B4SOIfactor1 * (T1 * 0.5 / T3 * dXdep_dT + T3 * pParam->B4SOIdvt2 * dVbseff_dT); else dlt1_dT = 0.0; /* v4.1 */ T0 = pParam->B4SOIdvt2w * Vbseff; if (T0 >= - 0.5) { T1 = 1.0 + T0; T2 = pParam->B4SOIdvt2w ; } else /* Added to avoid any discontinuity problems caused by dvt2w */ { T4 = 1.0 / (3.0 + 8.0 * T0); T1 = (1.0 + 3.0 * T0) * T4; T2 = pParam->B4SOIdvt2w * T4 * T4 ; } ltw= model->B4SOIfactor1 * T3 * T1; /* dltw_dVb=model->B4SOIfactor1*(0.5 / T3 * T1 * dXdep_dVb + T3 * T2); */ /* LFW_FD fix/add 4 lines */ dltw_dVg = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVg + 0.5 * T1 * dXdep_dVg / T3); dltw_dVd = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVd + 0.5 * T1 * dXdep_dVd / T3); dltw_dVb = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVb + 0.5 * T1 * dXdep_dVb / T3); dltw_dVe = model->B4SOIfactor1 * (T3 * T2 * dVbseff_dVe + 0.5 * T1 * dXdep_dVe / T3); /* fix next expression Wagner */ /*if (selfheat) dltw_dT = model->B4SOIfactor1 * T1 * 0.5 / T3 * dXdep_dT; */ if (selfheat) dltw_dT = model->B4SOIfactor1 * (T1 * 0.5 / T3 * dXdep_dT + T3 * pParam->B4SOIdvt2w * dVbseff_dT); else dltw_dT = 0.0; /* v4.1 */ T0 = -0.5 * pParam->B4SOIdvt1 * Leff / lt1; if (T0 > -EXPL_THRESHOLD) { T1 = exp(T0); Theta0 = T1 * (1.0 + 2.0 * T1); /*dT1_dVb = -T0 / lt1 * T1 * dlt1_dVb; */ /*dTheta0_dVb = (1.0 + 4.0 * T1) * dT1_dVb; */ /*dT1_dT = -T0 / lt1 * T1 * dlt1_dT; v4.2 bugfix temp deriv */ /*dTheta0_dT = (1.0 + 4.0 * T1) * dT1_dT; v4.2 bugfix temp deriv */ /* LFW_FD fix 5 derivatives */ dTheta0_dVg = -(1.0 + 4.0 * T1) * T1 * T0 * dlt1_dVg / lt1; dTheta0_dVd = -(1.0 + 4.0 * T1) * T1 * T0 * dlt1_dVd / lt1; dTheta0_dVb = -(1.0 + 4.0 * T1) * T1 * T0 * dlt1_dVb / lt1; dTheta0_dVe = -(1.0 + 4.0 * T1) * T1 * T0 * dlt1_dVe / lt1; dTheta0_dT = -(1.0 + 4.0 * T1) * T1 * T0 * dlt1_dT / lt1; } else { T1 = MIN_EXPL; Theta0 = T1 * (1.0 + 2.0 * T1); /* LFW_FD fix 5 derivatives */ dTheta0_dVg = 0.0; dTheta0_dVd = 0.0; dTheta0_dVb = 0.0; dTheta0_dVe = 0.0; dTheta0_dT = 0; /* v4.2 bugfix temp deriv */ } /* Calculate n */ T2 = pParam->B4SOInfactor * epssub / Xdep; /* LFW_FD add 3 derivatives */ dT2_dVg = - T2 / Xdep * dXdep_dVg; dT2_dVd = - T2 / Xdep * dXdep_dVd; dT2_dVb = - T2 / Xdep * dXdep_dVb; dT2_dVe = - T2 / Xdep * dXdep_dVe; dT2_dT = - T2 / Xdep * dXdep_dT; /* v4.2 bugfix temp deriv */ T3 = pParam->B4SOIcdsc + pParam->B4SOIcdscb * Vbseff + pParam->B4SOIcdscd * Vds; /* LFW_FD add/fix 5 derivatives */ dT3_dVg = pParam->B4SOIcdscb * dVbseff_dVg; dT3_dVd = pParam->B4SOIcdscb * dVbseff_dVd + pParam->B4SOIcdscd; dT3_dVb = pParam->B4SOIcdscb * dVbseff_dVb; dT3_dVe = pParam->B4SOIcdscb * dVbseff_dVe; dT3_dT = pParam->B4SOIcdscb * dVbseff_dT; /* LFW */ T4 = (T2 + T3 * Theta0 + pParam->B4SOIcit) / model->B4SOIcox; /* LFW_FD add/fix 5 derivatives */ dT4_dVg = (dT2_dVg + T3 * dTheta0_dVg + Theta0 * dT3_dVg) / model->B4SOIcox; dT4_dVd = (dT2_dVd + T3 * dTheta0_dVd + Theta0 * dT3_dVd) / model->B4SOIcox; dT4_dVb = (dT2_dVb + T3 * dTheta0_dVb + Theta0 * dT3_dVb) / model->B4SOIcox; dT4_dVe = (dT2_dVe + T3 * dTheta0_dVe + Theta0 * dT3_dVe) / model->B4SOIcox; dT4_dT = (dT2_dT + dTheta0_dT* T3 + Theta0*dT3_dT)/ model->B4SOIcox; /* LFW */ if (T4 >= -0.5) { n = 1.0 + T4; dn_dVg = dT4_dVg; dn_dVb = dT4_dVb; dn_dVd = dT4_dVd; dn_dVe = dT4_dVe; dn_dT = dT4_dT; /* v4.2 bugfix temp deriv */ } else /* avoid discontinuity problems caused by T4 */ { T0 = 1.0 / (3.0 + 8.0 * T4); /* n = (1.0 + 3.0 * T4) * T0; */ /* v4.2 bugfix temp deriv */ T5 = 1.0 + 3.0 * T4; /* v4.2 bugfix temp deriv */ n = T0 * T5; /* v4.2 bugfix temp deriv */ dn_dT = T0 * (3.0 - 8.0 * T5 * T0) * dT4_dT; /* Wagner - moved line up from 3 lines below */ T0 *= T0; dn_dVg = T0 * dT4_dVg; dn_dVb = T0 * dT4_dVb; dn_dVd = T0 * dT4_dVd; dn_dVe = T0 * dT4_dVe; } /* v4.0 DITS */ if (pParam->B4SOIdvtp0 > 0.0) { T0 = -pParam->B4SOIdvtp1 * Vds; if (T0 < -EXPL_THRESHOLD) { T2 = MIN_EXPL; dT2_dVd = 0.0; } else { T2 = exp(T0); dT2_dVd = -pParam->B4SOIdvtp1 * T2; } T3 = Leff + pParam->B4SOIdvtp0 * (1.0 + T2); dT3_dVd = pParam->B4SOIdvtp0 * dT2_dVd; T4 = Vtm * log(Leff / T3); dT4_dVd = -Vtm * dT3_dVd / T3; DITS_Sft = n * T4; dDITS_Sft_dVd = dn_dVd * T4 + n * dT4_dVd; dDITS_Sft_dVb = T4 * dn_dVb; if (selfheat) { /* dDITS_Sft_dT = n * KboQ * log(Leff / T3); */ /* v4.2 bugfix temp deriv */ dDITS_Sft_dT = n * KboQ * log(Leff / T3) + dn_dT * T4; /* v4.2 bugfix temp deriv */ } else dDITS_Sft_dT = 0.0; } else { DITS_Sft = dDITS_Sft_dVd = dDITS_Sft_dVb = 0.0; dDITS_Sft_dT = 0.0; } here->B4SOIthetavth = pParam->B4SOIdvt0 * Theta0; Delt_vth = here->B4SOIthetavth * V0; /* LFW_FD add/fix 4 derivatives */ dDelt_vth_dVg = pParam->B4SOIdvt0 * dTheta0_dVg * V0; dDelt_vth_dVd = pParam->B4SOIdvt0 * dTheta0_dVd * V0; dDelt_vth_dVb = pParam->B4SOIdvt0 * dTheta0_dVb * V0; dDelt_vth_dVe = pParam->B4SOIdvt0 * dTheta0_dVe * V0; if (selfheat) /* dDelt_vth_dT = here->B4SOIthetavth * dvbi_dT; */ /* v4.2 bugfix temp deriv */ dDelt_vth_dT = pParam->B4SOIdvt0 * (dTheta0_dT * V0 + Theta0 * (dvbi_dT - dphi_dT)); /* v4.2 bugfix temp deriv */ else dDelt_vth_dT = 0.0; T0 = -0.5 * pParam->B4SOIdvt1w * pParam->B4SOIweff * Leff / ltw; if (T0 > -EXPL_THRESHOLD) { T1 = exp(T0); T2 = T1 * (1.0 + 2.0 * T1); /*dT1_dVb = -T0 / ltw * T1 * dltw_dVb; */ /*dT2_dVb = (1.0 + 4.0 * T1) * dT1_dVb; */ /*dT1_dT = -T0 / ltw * T1 * dltw_dT; v4.2 bugfix temp deriv */ /*dT2_dT = (1.0 + 4.0 * T1) * dT1_dT; v4.2 bugfix temp deriv */ /* LFW_FD add/fix 5 derivatives */ dT2_dVg = -(1.0 + 4.0 * T1) * T1 * T0 * dltw_dVg / ltw; dT2_dVd = -(1.0 + 4.0 * T1) * T1 * T0 * dltw_dVd / ltw; dT2_dVb = -(1.0 + 4.0 * T1) * T1 * T0 * dltw_dVb / ltw; dT2_dVe = -(1.0 + 4.0 * T1) * T1 * T0 * dltw_dVe / ltw; dT2_dT = -(1.0 + 4.0 * T1) * T1 * T0 * dltw_dT / ltw; } else { T1 = MIN_EXPL; T2 = T1 * (1.0 + 2.0 * T1); /* LFW_FD add/fix 5 derivatives */ dT2_dVg = 0.0; dT2_dVd = 0.0; dT2_dVb = 0.0; dT2_dVe = 0.0; dT2_dT = 0.0; } T0 = pParam->B4SOIdvt0w * T2; DeltVthw = T0 * V0; /* LFW_FD add/fix 5 derivatives */ dDeltVthw_dVg = pParam->B4SOIdvt0w * dT2_dVg * V0; dDeltVthw_dVd = pParam->B4SOIdvt0w * dT2_dVd * V0; dDeltVthw_dVb = pParam->B4SOIdvt0w * dT2_dVb * V0; dDeltVthw_dVe = pParam->B4SOIdvt0w * dT2_dVe * V0; if (selfheat) dDeltVthw_dT = T0 * (dvbi_dT - dphi_dT) + pParam->B4SOIdvt0w * dT2_dT * V0; else dDeltVthw_dT = 0.0; T0 = sqrt(1.0 + pParam->B4SOIlpe0 / Leff); T1 = (pParam->B4SOIkt1 + pParam->B4SOIkt1l / Leff + pParam->B4SOIkt2 * Vbseff); DeltVthtemp = pParam->B4SOIk1ox * (T0 - 1.0) * sqrtPhi + T1 * TempRatioMinus1; /* v4.0 */ /* LFW_FD add/fix 5 derivatives */ dDeltVthtemp_dVg = TempRatioMinus1 * pParam->B4SOIkt2 * dVbseff_dVg; dDeltVthtemp_dVd = TempRatioMinus1 * pParam->B4SOIkt2 * dVbseff_dVd; dDeltVthtemp_dVb = TempRatioMinus1 * pParam->B4SOIkt2 * dVbseff_dVb; dDeltVthtemp_dVe = TempRatioMinus1 * pParam->B4SOIkt2 * dVbseff_dVe; if (selfheat) dDeltVthtemp_dT = pParam->B4SOIk1ox * (T0 - 1.0) * dsqrtPhi_dT + T1 / model-> B4SOItnom + pParam->B4SOIkt2 * TempRatioMinus1 * dVbseff_dT; else dDeltVthtemp_dT = 0.0; tmp2 = toxe * phi / (pParam->B4SOIweff + pParam->B4SOIw0); dtmp2_dT = toxe * dphi_dT / (pParam->B4SOIweff + pParam->B4SOIw0); /* v4.2 bugfix temp deriv */ T3 = here->B4SOIeta0 + pParam->B4SOIetab * Vbseff; if (T3 < 1.0e-4) /* avoid discontinuity problems caused by etab */ { T9 = 1.0 / (3.0 - 2.0e4 * T3); T3 = (2.0e-4 - T3) * T9; T4 = T9 * T9 * pParam->B4SOIetab; /* LFW_FD add/fix 4 derivatives */ dT3_dVg = T4 * dVbseff_dVg; dT3_dVd = T4 * dVbseff_dVd; dT3_dVb = T4 * dVbseff_dVb; dT3_dVe = T4 * dVbseff_dVe; } else { /* LFW_FD add/fix 4 derivatives */ dT3_dVg = pParam->B4SOIetab * dVbseff_dVg; dT3_dVd = pParam->B4SOIetab * dVbseff_dVd; dT3_dVb = pParam->B4SOIetab * dVbseff_dVb; dT3_dVe = pParam->B4SOIetab * dVbseff_dVe; } /* DIBL_Sft = T3 * pParam->B4SOItheta0vb0 * Vds; dDIBL_Sft_dVd = pParam->B4SOItheta0vb0 * T3; dDIBL_Sft_dVb = pParam->B4SOItheta0vb0 * Vds * dT3_dVb; v4.2 bugfix */ DIBL_Sft = T3 * theta0vb0 * Vds; /* LFW_FD add/fix 4 derivatives */ dDIBL_Sft_dVg = theta0vb0 * Vds * dT3_dVg; dDIBL_Sft_dVd = theta0vb0 * (Vds * dT3_dVd + T3) ; dDIBL_Sft_dVb = theta0vb0 * Vds * dT3_dVb; dDIBL_Sft_dVe = theta0vb0 * Vds * dT3_dVe; dDIBL_Sft_dT = T3 * Vds * dtheta0vb0_dT + pParam->B4SOIetab * dVbseff_dT * theta0vb0 * Vds; Lpe_Vb = sqrt(1.0 + pParam->B4SOIlpeb / Leff); T9 = 2.2361 / sqrtPhi; sqrtPhisExt = sqrtPhis - T9 * (Vbsh - Vbseff); /* LFW_FD add/fix 4 derivatives */ dsqrtPhisExt_dVg = dsqrtPhis_dVg - T9 * (dVbsh_dVg - dVbseff_dVg); dsqrtPhisExt_dVd = dsqrtPhis_dVd - T9 * (dVbsh_dVd - dVbseff_dVd); dsqrtPhisExt_dVb = dsqrtPhis_dVb - T9 * (dVbsh_dVb - dVbseff_dVb); dsqrtPhisExt_dVe = dsqrtPhis_dVe - T9 * (dVbsh_dVe - dVbseff_dVe); dsqrtPhisExt_dT = dsqrtPhis_dT - T9 * (dVbsh_dT - dVbseff_dT) + 2.2361 * dsqrtPhi_dT * (Vbsh - Vbseff) / phi; /* v4.2 bugfix temp deriv */ /* 4.1 */ T0 = exp(2.0 * pParam->B4SOIdvtp4 * Vds); DITS_Sft2 = pParam->B4SOIdvtp2factor * (T0-1) / (T0+1); dDITS_Sft2_dVd = pParam->B4SOIdvtp2factor * pParam->B4SOIdvtp4 * 4.0 * T0 / ((T0+1) * (T0+1)); Vth = model->B4SOItype * here->B4SOIvth0 + (pParam->B4SOIk1ox * sqrtPhisExt - pParam->B4SOIk1eff * sqrtPhi) * Lpe_Vb - here->B4SOIk2ox * Vbseff- Delt_vth - DeltVthw +(pParam->B4SOIk3 + pParam->B4SOIk3b * Vbseff) * tmp2 + DeltVthtemp - DIBL_Sft - DITS_Sft - DITS_Sft2; /* LFW_FD add/fix 2 derivatives */ dVth_dVg = pParam->B4SOIk1ox * dsqrtPhisExt_dVg * Lpe_Vb - here->B4SOIk2ox * dVbseff_dVg - dDelt_vth_dVg - dDeltVthw_dVg + pParam->B4SOIk3b * dVbseff_dVg * tmp2 + dDeltVthtemp_dVg - dDIBL_Sft_dVg; /* LFW_FD fix line */ dvth0_dT=0; here->B4SOIvon = Vth; T6 = pParam->B4SOIk3b * tmp2 - here->B4SOIk2ox + pParam->B4SOIkt2 * TempRatioMinus1; /* LFW_FD add/fix 4 derivatives */ /* this is actually dVth_dVbseff */ dVth_dVb = pParam->B4SOIk1ox * dsqrtPhisExt_dVb * Lpe_Vb - here->B4SOIk2ox * dVbseff_dVb - dDelt_vth_dVb - dDeltVthw_dVb + pParam->B4SOIk3b * dVbseff_dVb * tmp2 + dDeltVthtemp_dVb - dDIBL_Sft_dVb - dDITS_Sft_dVb; dVth_dVd = pParam->B4SOIk1ox * dsqrtPhisExt_dVd * Lpe_Vb - here->B4SOIk2ox * dVbseff_dVd - dDelt_vth_dVd - dDeltVthw_dVd + pParam->B4SOIk3b * dVbseff_dVd * tmp2 + dDeltVthtemp_dVd - dDIBL_Sft_dVd - dDITS_Sft_dVd - dDITS_Sft2_dVd; dVth_dVe = pParam->B4SOIk1ox * dsqrtPhisExt_dVe * Lpe_Vb - here->B4SOIk2ox * dVbseff_dVe - dDelt_vth_dVe - dDeltVthw_dVe + pParam->B4SOIk3b * dVbseff_dVe * tmp2 + dDeltVthtemp_dVe - dDIBL_Sft_dVe; /* LFW_FD fix line */ if (selfheat) /* dVth_dT = dDeltVthtemp_dT - dDelt_vth_dT - dDeltVthw_dT - dDITS_Sft_dT; */ dVth_dT = dDeltVthtemp_dT - dDelt_vth_dT - dDeltVthw_dT +(pParam->B4SOIk1ox * dsqrtPhisExt_dT- pParam->B4SOIk1eff * dsqrtPhi_dT) * Lpe_Vb - here->B4SOIk2ox*dVbseff_dT + pParam->B4SOIk3b*tmp2*dVbseff_dT + (pParam->B4SOIk3 + pParam->B4SOIk3b * Vbseff)*dtmp2_dT + model->B4SOItype * dvth0_dT - dDIBL_Sft_dT - dDITS_Sft_dT; /* v4.2 temp deriv */ else dVth_dT = 0.0; /* dVthzb_dT calculation */ if ((model->B4SOIcapMod == 3) && (selfheat == 1)) { T3zb = sqrt(Xdep0); ltwzb = lt1zb = model->B4SOIfactor1 * T3zb; dT3zb_dT = 1.0 / (2.0 * T3zb) * dXdep0_dT; /* v4.2 bugfix temp deriv */ dltwzb_dT = dlt1zb_dT = model->B4SOIfactor1 * dT3zb_dT; /* v4.2 bugfix temp deriv */ T0 = -0.5 * pParam->B4SOIdvt1 * Leff / lt1zb; if (T0 > -EXPL_THRESHOLD) { T1 = exp(T0); Theta0zb = T1 * (1.0 + 2.0 * T1); dT0_dT = -(T0 / lt1zb) * dlt1zb_dT; /* v4.2 bugfix temp deriv */ dT1_dT = T1 * dT0_dT; /* v4.2 bugfix temp deriv */ dTheta0zb_dT = (1.0 + 4.0 * T1) * dT1_dT; /* v4.2 bugfix temp deriv */ } else { T1 = MIN_EXPL; Theta0zb = T1 * (1.0 + 2.0 * T1); dTheta0zb_dT=0; /* v4.2 bugfix temp deriv */ } Delt_vthzb = pParam->B4SOIdvt0 * Theta0zb * V0; /* dDelt_vthzb_dT = pParam->B4SOIdvt0 * Theta0zb * dvbi_dT; */ /* v4.2 bugfix temp deriv */ dDelt_vthzb_dT = pParam->B4SOIdvt0 *( Theta0zb * (dvbi_dT - dphi_dT) + dTheta0zb_dT *V0); /* v4.2 bugfix temp deriv */ T0 = -0.5 * pParam->B4SOIdvt1w * pParam->B4SOIweff * Leff / ltwzb; if (T0 > -EXPL_THRESHOLD) { T1 = exp(T0); T2 = T1 * (1.0 + 2.0 * T1); dT0_dT = -(T0 / ltwzb) * dltwzb_dT; /* v4.2 bugfix temp deriv */ dT1_dT = T1 * dT0_dT; /* v4.2 bugfix temp deriv */ dT2_dT = (1.0 + 4.0 * T1) * dT1_dT; /* v4.2 bugfix temp deriv */ } else { T1 = MIN_EXPL; T2 = T1 * (1.0 + 2.0 * T1); dT2_dT=0; /* v4.2 bugfix temp deriv */ } T0 = pParam->B4SOIdvt0w * T2; dT0_dT= pParam->B4SOIdvt0w * dT2_dT; /* v4.2 bugfix temp deriv */ DeltVthwzb = T0 * V0; /* dDeltVthwzb_dT = T0 * dvbi_dT; *//* v4.2 bugfix temp deriv */ dDeltVthwzb_dT = ( T0 * (dvbi_dT - dphi_dT)+ dT0_dT *V0); /* v4.2 bugfix temp deriv */ T0 = sqrt(1.0 + pParam->B4SOIlpe0 / Leff); T1 = (pParam->B4SOIkt1 + pParam->B4SOIkt1l / Leff); DeltVthtempzb = pParam->B4SOIk1ox * (T0 - 1.0) * sqrtPhi + T1 * TempRatioMinus1; dDeltVthtempzb_dT = pParam->B4SOIk1ox * (T0 - 1.0) * dsqrtPhi_dT + T1 / model->B4SOItnom; /* v4.2 bugfix temp deriv */ Vthzb = model->B4SOItype * here->B4SOIvth0 - Delt_vthzb - DeltVthwzb + pParam->B4SOIk3 * tmp2 + DeltVthtempzb; dVthzb_dT = model->B4SOItype * dvth0_dT - dDelt_vthzb_dT - dDeltVthwzb_dT + pParam->B4SOIk3 * dtmp2_dT + dDeltVthtempzb_dT; /* v4.2 bugfix temp deriv */ /* Vthzb2 = Vthzb + 1.12; v4.1 */ /* v4.2 never used */ } else /* LFW_FD */ Vthzb = dVthzb_dT = 0.0; /* LFW_FD flexilint */ /* Effective Vgst (Vgsteff) Calculation */ Vgst = Vgs_eff - Vth; dVgst_dVg = dVgs_eff_dVg - dVth_dVg; /* LFW_FD fix derivative */ dVgst_dVd = -dVth_dVd; dVgst_dVb = -dVth_dVb; dVgst_dVe = -dVth_dVe; /* LFW_FD new line */ if (selfheat) { dVgst_dT = dVgs_eff_dT - dVth_dT; } else dVgst_dT = 0.0; T10 = n * Vtm; /* v4.0 */ VgstNVt = pParam->B4SOImstar * Vgst / T10; /* v4.0 */ /* LFW_FD add/fix 4 derivatives */ dVgstNVt_dVg = (pParam->B4SOImstar * dVgst_dVg - VgstNVt * dn_dVg * Vtm) / T10; dVgstNVt_dVd = (pParam->B4SOImstar * dVgst_dVd - VgstNVt * dn_dVd * Vtm) / T10; dVgstNVt_dVb = (pParam->B4SOImstar * dVgst_dVb - VgstNVt * dn_dVb * Vtm) / T10; dVgstNVt_dVe = (pParam->B4SOImstar * dVgst_dVe - VgstNVt * dn_dVe * Vtm) / T10; ExpArg = (pParam->B4SOIvoff - (1- pParam->B4SOImstar) * Vgst)/T10; /* LFW_FD */ /* LFW_FD add/fix 4 derivatives */ dExpArg_dVg = (-(1- pParam->B4SOImstar) * dVgst_dVg - ExpArg * dn_dVg * Vtm) / T10; dExpArg_dVd = (-(1- pParam->B4SOImstar) * dVgst_dVd - ExpArg * dn_dVd * Vtm) / T10; dExpArg_dVb = (-(1- pParam->B4SOImstar) * dVgst_dVb - ExpArg * dn_dVb * Vtm) / T10; dExpArg_dVe = (-(1- pParam->B4SOImstar) * dVgst_dVe - ExpArg * dn_dVe * Vtm) / T10; if (selfheat) { dT10_dT = n * dVtm_dT + dn_dT * Vtm; dVgstNVt_dT = -(-pParam->B4SOImstar*dVgst_dT + VgstNVt*dT10_dT)/T10; dExpArg_dT = -(1- pParam->B4SOImstar)*dVgst_dT/T10 -ExpArg*dT10_dT/T10; } else { dT10_dT = 0.0; dVgstNVt_dT = 0.0; dExpArg_dT = 0.0; } /* LFW_FD new line */ dExpVgst_dVg = dExpVgst_dVd = dExpVgst_dVb = dExpVgst_dVe = dExpVgst_dT = 0.0; /* MCJ: Very small Vgst */ if (VgstNVt > EXPL_THRESHOLD) { ExpVgst = 1.0; /* LFW_FD flexilint */ Vgsteff = Vgst; /* T0 is dVgsteff_dVbseff */ T0 = -dVth_dVb; /* LFW_FD add/fix 5 derivatives */ dVgsteff_dVg = dVgst_dVg; dVgsteff_dVd = dVgst_dVd; dVgsteff_dVb = dVgst_dVb; dVgsteff_dVe = dVgst_dVe; if (selfheat) dVgsteff_dT = dVgst_dT ; /* LFW */ else dVgsteff_dT = 0.0; } else if (ExpArg > EXPL_THRESHOLD) { T0 = (Vgst - pParam->B4SOIvoff) / (n * Vtm); ExpVgst = exp(T0); /* LFW_FD add/fix 4 derivatives */ dExpVgst_dVg = (dVgst_dVg - T0 * dn_dVg * Vtm) /(n * Vtm); dExpVgst_dVd = (dVgst_dVd - T0 * dn_dVd * Vtm) /(n * Vtm); dExpVgst_dVb = (dVgst_dVb - T0 * dn_dVb * Vtm) /(n * Vtm); dExpVgst_dVe = (dVgst_dVe - T0 * dn_dVe * Vtm) /(n * Vtm); /*Vgsteff = Vtm * pParam->B4SOIcdep0 / model->B4SOIcox * ExpVgst; *//*v4.2 bug fix */ Vgsteff = Vtm * cdep0 / model->B4SOIcox * ExpVgst; /* v4.2 bug fix */ T3 = Vgsteff / (n * Vtm) ; /* T1 is dVgsteff_dVbseff */ /*T1 = -T3 * (dVth_dVb + T0 * Vtm * dn_dVb);*/ /* LFW_FD fix T1 and 4 derivatives */ T1 = -T3 * ( T0 * Vtm * dn_dVb); dVgsteff_dVg = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVg; dVgsteff_dVd = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVd; dVgsteff_dVb = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVb; dVgsteff_dVe = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVe; /* enhance next if-then-else block - Wagner*/ if (selfheat) { /* dVgsteff_dT = -T3 * (dVth_dT + T0 * dVtm_dT * n) + Vgsteff / Temp+ T1 * dVbseff_dT; v3.0 */ /* v4.2 temp deriv*/ dVgsteff_dT = -T3 * (-dVgst_dT + T0 * dVtm_dT * n + Vtm * dn_dT) + Vgsteff / Temp+ T1 * dVbseff_dT; /*v4.2 temp deriv*/ dTL0_dT = (dVgst_dT - T0 * (dn_dT * Vtm + n * dVtm_dT)) / (n * Vtm); dExpVgst_dT = ExpVgst * dTL0_dT; dVgsteff_dT = Vgsteff * (dVtm_dT/Vtm + dcdep0_dT/cdep0 + dExpVgst_dT/ExpVgst); } else { dExpVgst_dT = 0.0; dVgsteff_dT = 0.0; } } else { ExpVgst = exp(VgstNVt); /* LFW_FD add/fix 4 derivatives */ dExpVgst_dVg = ExpVgst * dVgstNVt_dVg; dExpVgst_dVd = ExpVgst * dVgstNVt_dVd; dExpVgst_dVb = ExpVgst * dVgstNVt_dVb; dExpVgst_dVe = ExpVgst * dVgstNVt_dVe; /* 4 new lines Wagner */ if (selfheat) dExpVgst_dT = ExpVgst * dVgstNVt_dT; else dExpVgst_dT = 0.0; T1 = T10 * log(1.0 + ExpVgst); /* LFW_FD add/fix 4 derivatives */ dT1_dVg = T10 * dExpVgst_dVg / (1.0 + ExpVgst) + T1 * dn_dVg / n; dT1_dVd = T10 * dExpVgst_dVd / (1.0 + ExpVgst) + T1 * dn_dVd / n; dT1_dVb = T10 * dExpVgst_dVb / (1.0 + ExpVgst) + T1 * dn_dVb / n; dT1_dVe = T10 * dExpVgst_dVe / (1.0 + ExpVgst) + T1 * dn_dVe / n; /*T3 = (1.0 / Temp); */ T3 = (1.0 / Temp + dn_dT / n); /* v4.2 temp deriv */ if (selfheat) /* fix below expression Wagner */ /*dT1_dT = -dT1_dVg * (dVth_dT + Vgst * T3) + T1 * T3;*/ dT1_dT = dT10_dT*log(1.0 + ExpVgst) + T10 * dExpVgst_dT / (1.0 + ExpVgst); else dT1_dT = 0.0; /*dT2_dVg = -model->B4SOIcox / (Vtm * pParam->B4SOIcdep0) * exp(ExpArg) * (1 - pParam->B4SOImstar);*/ /*v4.2 bug fix*/ dT2_dVg = -model->B4SOIcox / (Vtm * cdep0) * exp(ExpArg) * (1 - pParam->B4SOImstar); /*v4.2 bug fix*/ T2 = pParam->B4SOImstar - T10 * dT2_dVg / (1.0 - pParam->B4SOImstar); /* LFW_FD fix all 5 T2 derivatives */ TL1 = dT2_dVg; dTL1_dVg = TL1 * dExpArg_dVg; dTL1_dVd = TL1 * dExpArg_dVd; dTL1_dVb = TL1 * dExpArg_dVb; dTL1_dVe = TL1 * dExpArg_dVe; dT2_dVg = -(dn_dVg * Vtm * TL1 + T10 * dTL1_dVg) / (1.0 - pParam->B4SOImstar); dT2_dVd = -(dn_dVd * Vtm * TL1 + T10 * dTL1_dVd) / (1.0 - pParam->B4SOImstar); dT2_dVb = -(dn_dVb * Vtm * TL1 + T10 * dTL1_dVb) / (1.0 - pParam->B4SOImstar); dT2_dVe = -(dn_dVe * Vtm * TL1 + T10 * dTL1_dVe) / (1.0 - pParam->B4SOImstar); if (selfheat) dT2_dT = -(dT10_dT * TL1 +T10*TL1*(-dVtm_dT/Vtm-dcdep0_dT/cdep0+dExpArg_dT) )/(1.0 - pParam->B4SOImstar); else dT2_dT = 0.0; Vgsteff = T1 / T2; T3 = T2 * T2; /* T4 is dVgsteff_dVbseff */ T4 = (T2 * dT1_dVb - T1 * dT2_dVb) / T3; /* LFW_FD fix 4 derivatives */ dVgsteff_dVg = (T2 * dT1_dVg - T1 * dT2_dVg) / T3; dVgsteff_dVd = (T2 * dT1_dVd - T1 * dT2_dVd) / T3; dVgsteff_dVb = (T2 * dT1_dVb - T1 * dT2_dVb) / T3; dVgsteff_dVe = (T2 * dT1_dVe - T1 * dT2_dVe) / T3; if (selfheat) dVgsteff_dT = (T2 * dT1_dT - T1 * dT2_dT) / T3; else dVgsteff_dT = 0.0; } Vgst2Vtm = Vgsteff + 2.0 * Vtm; if (selfheat) dVgst2Vtm_dT = dVgsteff_dT + 2.0 * dVtm_dT; /* v3.1.1 bug fix */ else dVgst2Vtm_dT = 0.0; here->B4SOIVgsteff = Vgsteff; /* v2.2.3 bug fix */ /* v4.0 F-factor (degradation factor due to pocket implant) */ if (pParam->B4SOIfprout <= 0.0) { FP = 1.0; /* LFW_FD enhance line */ dFP_dVg = dFP_dVb = dFP_dVd = dFP_dVe = dFP_dT = 0.0; } else { T9 = pParam->B4SOIfprout * sqrt(Leff) / Vgst2Vtm; FP = 1.0 / (1.0 + T9); /* LFW_FD fix/add 5 derivatives */ dFP_dVg = FP * FP * T9 / Vgst2Vtm * dVgsteff_dVg; dFP_dVb = FP * FP * T9 / Vgst2Vtm * dVgsteff_dVb; dFP_dVd = FP * FP * T9 / Vgst2Vtm * dVgsteff_dVd; dFP_dVe = FP * FP * T9 / Vgst2Vtm * dVgsteff_dVe; if (selfheat) dFP_dT = FP * T9 * dVgst2Vtm_dT / (1.0 + T9) / Vgst2Vtm; else dFP_dT = 0.0; } /* Calculate Effective Channel Geometry */ T9 = sqrtPhis - sqrtPhi; Weff = pParam->B4SOIweff - (2.0 - here->B4SOInbc) * (pParam->B4SOIdwg * Vgsteff + pParam->B4SOIdwb * T9); /* LFW_FD fix/add 4 derivatives */ dWeff_dVg = -(2.0 - here->B4SOInbc) * (pParam->B4SOIdwg * dVgsteff_dVg + pParam->B4SOIdwb * dsqrtPhis_dVg); dWeff_dVb = -(2.0 - here->B4SOInbc) * (pParam->B4SOIdwg * dVgsteff_dVb + pParam->B4SOIdwb * dsqrtPhis_dVb); dWeff_dVd = -(2.0 - here->B4SOInbc) * (pParam->B4SOIdwg * dVgsteff_dVd + pParam->B4SOIdwb * dsqrtPhis_dVd); dWeff_dVe = -(2.0 - here->B4SOInbc) * (pParam->B4SOIdwg * dVgsteff_dVe + pParam->B4SOIdwb * dsqrtPhis_dVe); /* New - next 5 lines - Wagner */ if (selfheat) dWeff_dT = -(2.0 - here->B4SOInbc) * (pParam->B4SOIdwg * dVgsteff_dT + pParam->B4SOIdwb*(dsqrtPhis_dT - dsqrtPhi_dT)); else dWeff_dT = 0.0; if (Weff < 2.0e-8) /* to avoid the discontinuity problem due to Weff*/ { T0 = 1.0 / (6.0e-8 - 2.0 * Weff); Weff = 2.0e-8 * (4.0e-8 - Weff) * T0; T0 *= T0 * 4.0e-16; dWeff_dVg *= T0; dWeff_dVb *= T0; /* LFW_FD add 2 derivatives */ dWeff_dVd *= T0; dWeff_dVe *= T0; dWeff_dT *= T0; /* new line - Wagner */ } if (model->B4SOIrdsMod == 1) /* v4.0 */ /* LFW_FD enhance line */ Rds = dRds_dVg = dRds_dVb = dRds_dVd = dRds_dVe = dRds_dT = 0.0; else { T0 = pParam->B4SOIprwg * Vgsteff + pParam->B4SOIprwb * T9; /* LFW_FD add 4 derivatives */ dT0_dVg = pParam->B4SOIprwg * dVgsteff_dVg + pParam->B4SOIprwb * dsqrtPhis_dVg; dT0_dVb = pParam->B4SOIprwg * dVgsteff_dVb + pParam->B4SOIprwb * dsqrtPhis_dVb; dT0_dVd = pParam->B4SOIprwg * dVgsteff_dVd + pParam->B4SOIprwb * dsqrtPhis_dVd; dT0_dVe = pParam->B4SOIprwg * dVgsteff_dVe + pParam->B4SOIprwb * dsqrtPhis_dVe; dT0_dT = pParam->B4SOIprwg*dVgsteff_dT + pParam->B4SOIprwb*(dsqrtPhis_dT - dsqrtPhi_dT); /* new expression Wagner */ if (T0 >= -0.9) { Rds = rds0 * (1.0 + T0); /* LFW_FD add/fix 4 derivatives */ dRds_dVg = rds0 * dT0_dVg; dRds_dVb = rds0 * dT0_dVb; dRds_dVd = rds0 * dT0_dVd; dRds_dVe = rds0 * dT0_dVe; if (selfheat && (Rds!=0.0)) /*fix below expression Wagner */ /*dRds_dT = (1.0 + T0) * drds0_dT;*/ dRds_dT = (1.0 + T0) * drds0_dT + rds0 * dT0_dT; else dRds_dT = 0.0; } else /* to avoid the discontinuity problem due to prwg and prwb*/ { T1 = 1.0 / (17.0 + 20.0 * T0); Rds = rds0 * (0.8 + T0) * T1; /* LFW_FD add/fix 4 derivatives */ dRds_dVg = (rds0*T1- 20*Rds*T1) * dT0_dVg; dRds_dVb = (rds0*T1- 20*Rds*T1) * dT0_dVb; dRds_dVd = (rds0*T1- 20*Rds*T1) * dT0_dVd; dRds_dVe = (rds0*T1- 20*Rds*T1) * dT0_dVe; if (selfheat && (Rds!=0.0)) /*fix below expression Wagner */ /*dRds_dT = (0.8 + T0) * T1 * drds0_dT;*/ dRds_dT = (0.8 + T0) * T1 * drds0_dT + (rds0*T1- 20*Rds*T1) * dT0_dT; else dRds_dT = 0.0; } /* here->B4SOIrds = Rds; v2.2.3 bug fix */ /* v4.2 bugfix # 39 */ } here->B4SOIrds = Rds / here->B4SOInf; /* LFW_FD fix */ /* Calculate Abulk */ if (pParam->B4SOIa0 == 0.0) { Abulk0 = Abulk = 1.0; /* LFW_FD expand next 3 lines */ dAbulk_dVg = dAbulk_dVb = dAbulk_dVd = dAbulk_dVe = 0.0; dAbulk0_dVg = dAbulk0_dVb = dAbulk0_dVd = dAbulk0_dVe = 0.0; dAbulk0_dT = dAbulk_dT = 0.0; } else { T10 = pParam->B4SOIketa * Vbsh; if (T10 >= -0.9) { T11 = 1.0 / (1.0 + T10); /* LFW_FD add/fix 5 derivatives */ dT11_dVg = -pParam->B4SOIketa * T11 * T11 * dVbsh_dVg; dT11_dVb = -pParam->B4SOIketa * T11 * T11 * dVbsh_dVb; dT11_dVd = -pParam->B4SOIketa * T11 * T11 * dVbsh_dVd; dT11_dVe = -pParam->B4SOIketa * T11 * T11 * dVbsh_dVe; dT11_dT = -pParam->B4SOIketa * T11 * T11 * dVbsh_dT; } else { /* added to avoid the problems caused by Keta */ T12 = 1.0 / (0.8 + T10); T11 = (17.0 + 20.0 * T10) * T12; /* LFW_FD add/fix 5 derivatives */ dT11_dVg = (20.0-T11) * T12 * pParam->B4SOIketa * dVbsh_dVg; dT11_dVb = (20.0-T11) * T12 * pParam->B4SOIketa * dVbsh_dVb; dT11_dVd = (20.0-T11) * T12 * pParam->B4SOIketa * dVbsh_dVd; dT11_dVe = (20.0-T11) * T12 * pParam->B4SOIketa * dVbsh_dVe; dT11_dT = (20.0-T11) * T12 * pParam->B4SOIketa * dVbsh_dT; } /* v3.0 bug fix */ T10 = phi + pParam->B4SOIketas; T13 = (Vbsh * T11) / T10; /* LFW_FD add/fix 5 derivatives */ dT13_dVg = (Vbsh * dT11_dVg + T11 * dVbsh_dVg) / T10; dT13_dVb = (Vbsh * dT11_dVb + T11 * dVbsh_dVb) / T10; dT13_dVd = (Vbsh * dT11_dVd + T11 * dVbsh_dVd) / T10; dT13_dVe = (Vbsh * dT11_dVe + T11 * dVbsh_dVe) / T10; dT13_dT = (dVbsh_dT * T11 + Vbsh * dT11_dT - T13 * dphi_dT) / T10; /* limit 1/sqrt(1-T13) to 6, starting at T13=0.96 */ if (T13 < 0.96) { T14 = 1 / sqrt(1-T13); T10 = 0.5 * T14 / (1-T13); /* LFW_FD add/fix 5 derivatives */ dT14_dVg = T10 * dT13_dVg; dT14_dVb = T10 * dT13_dVb; dT14_dVd = T10 * dT13_dVd; dT14_dVe = T10 * dT13_dVe; dT14_dT = T10 * dT13_dT; } else { /* IBM tweak */ T11 = 1.0 / (1.0 - 1.0593220339*T13); T14 = (6.0169491525 - 6.3559322034 * T13) * T11; /* T10 = 0.0179546 * T11 * T11; never used - Wagner */ /* LFW_FD add/fix 5 derivatives */ dT14_dVg = (T14 * 1.0593220339 - 6.3559322034) * T11 * dT13_dVg; dT14_dVb = (T14 * 1.0593220339 - 6.3559322034) * T11 * dT13_dVb; dT14_dVd = (T14 * 1.0593220339 - 6.3559322034) * T11 * dT13_dVd; dT14_dVe = (T14 * 1.0593220339 - 6.3559322034) * T11 * dT13_dVe; dT14_dT = (T14 * 1.0593220339 - 6.3559322034) * T11 * dT13_dT; } /* v3.0 bug fix */ /* T10 = 0.5 * pParam->B4SOIk1eff / sqrt(phi + pParam->B4SOIketas); */ T10 = 0.5 * pParam->B4SOIk1ox * Lpe_Vb / sqrt(phi + pParam->B4SOIketas); /* v4.0 */ T1 = T10 * T14; /* LFW_FD add/fix 4 derivatives */ dT1_dVg = T10 * dT14_dVg; dT1_dVb = T10 * dT14_dVb; dT1_dVd = T10 * dT14_dVd; dT1_dVe = T10 * dT14_dVe; T9 = sqrt(pParam->B4SOIxj * Xdep); tmp1 = Leff + 2.0 * T9; T5 = Leff / tmp1; tmp2 = pParam->B4SOIa0 * T5; tmp3 = pParam->B4SOIweff + pParam->B4SOIb1; tmp4 = pParam->B4SOIb0 / tmp3; T2 = tmp2 + tmp4; /* LFW_FD add/fix 4 derivatives */ dT2_dVg = -tmp2 / tmp1 * pParam->B4SOIxj * dXdep_dVg / T9; dT2_dVb = -tmp2 / tmp1 * pParam->B4SOIxj * dXdep_dVb / T9; dT2_dVd = -tmp2 / tmp1 * pParam->B4SOIxj * dXdep_dVd / T9; dT2_dVe = -tmp2 / tmp1 * pParam->B4SOIxj * dXdep_dVe / T9; T6 = T5 * T5; T7 = T5 * T6; /* LFW_FD add 4 derivatives */ dT7_dVg = -3.0 * T7 / tmp1 * pParam->B4SOIxj * dXdep_dVg / T9; dT7_dVb = -3.0 * T7 / tmp1 * pParam->B4SOIxj * dXdep_dVb / T9; dT7_dVd = -3.0 * T7 / tmp1 * pParam->B4SOIxj * dXdep_dVd / T9; dT7_dVe = -3.0 * T7 / tmp1 * pParam->B4SOIxj * dXdep_dVe / T9; Abulk0 = 1 + T1 * T2; /* LFW_FD add/fix 4 derivatives */ dAbulk0_dVg = T1 * dT2_dVg + T2 * dT1_dVg; dAbulk0_dVb = T1 * dT2_dVb + T2 * dT1_dVb; dAbulk0_dVd = T1 * dT2_dVd + T2 * dT1_dVd; dAbulk0_dVe = T1 * dT2_dVe + T2 * dT1_dVe; T8 = pParam->B4SOIags * pParam->B4SOIa0 * T7; dAbulk_dVg = -T1 * T8; Abulk = Abulk0 + dAbulk_dVg * Vgsteff; /* LFW_FD add/fix 4 derivatives */ dAbulk_dVg = dAbulk0_dVg + dAbulk_dVg * dVgsteff_dVg - (T1 * pParam->B4SOIags * pParam->B4SOIa0 * dT7_dVg + T8 * dT1_dVg) * Vgsteff; dAbulk_dVb = dAbulk0_dVb - T1 * T8 * dVgsteff_dVb - (T1 * pParam->B4SOIags * pParam->B4SOIa0 * dT7_dVb + T8 * dT1_dVb) * Vgsteff; dAbulk_dVd = dAbulk0_dVd - T1 * T8 * dVgsteff_dVd - (T1 * pParam->B4SOIags * pParam->B4SOIa0 * dT7_dVd + T8 * dT1_dVd) * Vgsteff; dAbulk_dVe = dAbulk0_dVe - T1 * T8 * dVgsteff_dVe - (T1 * pParam->B4SOIags * pParam->B4SOIa0 * dT7_dVe + T8 * dT1_dVe) * Vgsteff; /* 21 new lines Wagner */ /* need temperature derivs of Abulk & Abulk0 */ TL2 = phi + pParam->B4SOIketas; dTL1_dT = -0.5*T10/TL2*dphi_dT; /* TL2 = T14; not used - Wagner */ dTL3_dT = (0.5*pParam->B4SOIxj/T9)*dXdep_dT; dTL4_dT = -2*tmp2*dTL3_dT/tmp1; /* dTL5_dT = -T13*dphi_dT/(phi + pParam->B4SOIketas); not used - Wagner */ /* dTL6_dT = 0.5*T14*dTL5_dT/(1-T13); not used - Wagner */ /* fix line below - Wagner */ /* dTL7_dT = T10*dTL6_dT + T14*dTL1_dT; */ dTL7_dT = T10*dT14_dT + T14*dTL1_dT; dTL8_dT = -pParam->B4SOIags*pParam->B4SOIa0*6*T7*dTL3_dT/tmp1; dTL9_dT = -dTL7_dT*T8 - T1*dTL8_dT; if (selfheat) { dAbulk0_dT = T1*dTL4_dT + T2*dTL7_dT; dAbulk_dT = dAbulk0_dT + dTL9_dT*Vgsteff + dAbulk_dVg*dVgsteff_dT; } else { dAbulk0_dT = 0.0; dAbulk_dT = 0.0; } } if (Abulk0 < 0.01) { T9 = 1.0 / (3.0 - 200.0 * Abulk0); Abulk0 = (0.02 - Abulk0) * T9; /* fix line below - Wagner */ /* dAbulk0_dVb *= T9 * T9; */ T10 = (200.0 * Abulk0 - 1.0) * T9; /* LFW_FD add/fix 5 derivatives */ dAbulk0_dVg *= T10; dAbulk0_dVb *= T10; dAbulk0_dVd *= T10; dAbulk0_dVe *= T10; dAbulk0_dT *= T10; } if (Abulk < 0.01) { T9 = 1.0 / (3.0 - 200.0 * Abulk); Abulk = (0.02 - Abulk) * T9; /* fix line below - Wagner */ /* dAbulk_dVb *= T9 * T9; */ /* T10 = T9 * T9; 3.2 bug fix */ T10 = (200.0 * Abulk - 1.0) * T9; /* LFW_FD add/fix 5 derivatives */ dAbulk_dVg *= T10; /* 3.2 bug fix */ dAbulk_dVb *= T10; /* 3.2 bug fix */ dAbulk_dVd *= T10; /* 3.2 bug fix */ dAbulk_dVe *= T10; dAbulk_dT *= T10; } here->B4SOIAbulk = Abulk; /*v3.2 for noise */ /* Mobility calculation */ if (model->B4SOImtrlMod) { /* extend "then" block Wagner */ /*T14 = 2.0 * model->B4SOItype *(model->B4SOIphig - model->B4SOIeasub - 0.5 * Eg + 0.45); toxe_mob = model->B4SOIeot * model->B4SOIepsrsub / 3.9;} Bug fix #4 Jun 09 implementing Eeff correctly*/ T14 = 2.0 * model->B4SOItype *(model->B4SOIphig - model->B4SOIeasub - 0.5 * Eg + 0.45); toxe_mob = model->B4SOIeot * model->B4SOIepsrsub / 3.9; /* Bug fix #4 Jun 09 implementing Eeff correctly*/ /* 3 new lines Wagner */ if (selfheat) dT14_dT = - model->B4SOItype * dEg_dT; else dT14_dT = 0.0;} else { T14 = 0.0; /* extend "else" block Wagner */ /*toxe_mob = model->B4SOItox;}*/ toxe_mob = model->B4SOItox; dT14_dT = 0.0;} /* new line Wagner */ if (model->B4SOImobMod == 1) { T0 = Vgsteff + Vth + Vth - T14; T2 = ua + uc * Vbseff; T3 = T0 / toxe_mob; /* Bug fix #4 Jun 09 implementing Eeff correctly*/ T5 = T3 * (T2 + ub * T3); /* LFW_FD fix 5 derivatives */ dDenomi_dVg = (T2 + 2.0 * ub * T3) / toxe_mob * (dVgsteff_dVg + 2 * dVth_dVg) + T3 * uc * dVbseff_dVg; dDenomi_dVb = (T2 + 2.0 * ub * T3) / toxe_mob * (dVgsteff_dVb + 2 * dVth_dVb) + T3 * uc * dVbseff_dVb; dDenomi_dVd = (T2 + 2.0 * ub * T3) / toxe_mob * (dVgsteff_dVd + 2 * dVth_dVd) + T3 * uc * dVbseff_dVd; dDenomi_dVe = (T2 + 2.0 * ub * T3) / toxe_mob * (dVgsteff_dVe + 2 * dVth_dVe) + T3 * uc * dVbseff_dVe; if (selfheat) dDenomi_dT = (T2 + 2.0 * ub * T3) / toxe_mob * (2 * dVth_dT + dVgsteff_dT - dT14_dT) + (dua_dT + Vbseff * duc_dT + uc * dVbseff_dT + dub_dT * T3 ) * T3; else dDenomi_dT = 0.0; } else if (model->B4SOImobMod == 2) /* Bug fix #5 Jun 09 implementing Eeff correctly*/ { T5 = (Vgsteff -T14)/ toxe * (ua /* MobMod=2 does not use Eeff */ + uc * Vbseff + ub * (Vgsteff -T14) /* 'toxe' keeps code consistent with BSIMSOI4.1 Manual*/ / toxe); /* LFW_FD fix 5 derivatives */ dDenomi_dVg = (ua + uc * Vbseff + 2.0 * ub * (Vgsteff -T14) / toxe) / toxe * dVgsteff_dVg + (Vgsteff -T14) /toxe * uc * dVbseff_dVg; dDenomi_dVb = (ua + uc * Vbseff + 2.0 * ub * (Vgsteff -T14) / toxe) / toxe * dVgsteff_dVb + (Vgsteff -T14) /toxe * uc * dVbseff_dVb; dDenomi_dVd = (ua + uc * Vbseff + 2.0 * ub * (Vgsteff -T14) / toxe) / toxe * dVgsteff_dVd + (Vgsteff -T14) /toxe * uc * dVbseff_dVd; dDenomi_dVe = (ua + uc * Vbseff + 2.0 * ub * (Vgsteff -T14) / toxe) / toxe * dVgsteff_dVe + (Vgsteff -T14) / toxe * uc * dVbseff_dVe; if (selfheat) dDenomi_dT = (ua + uc * Vbseff + 2.0 * ub * (Vgsteff -T14) / toxe) / toxe * (dVgsteff_dT-dT14_dT) + (Vgsteff -T14)/ toxe * (dua_dT + Vbseff * duc_dT + uc * dVbseff_dT + dub_dT * (Vgsteff -T14)/ toxe); else dDenomi_dT = 0.0; } else if (model->B4SOImobMod == 3) /* mobMod == 3 */ { T0 = Vgsteff + Vth + Vth - T14; T2 = 1.0 + uc * Vbseff; T3 = T0 / toxe_mob; /* Bug fix #4 Jun 09 implementing Eeff correctly*/ T4 = T3 * (ua + ub * T3); T5 = T4 * T2; /* LFW_FD fix 5 derivatives */ dDenomi_dVg = (ua + 2.0 * ub * T3) * T2 * (dVgsteff_dVg + 2 * dVth_dVg) / toxe_mob + T4 * uc * dVbseff_dVg; dDenomi_dVb = (ua + 2.0 * ub * T3) * T2 * (dVgsteff_dVb + 2 * dVth_dVb) / toxe_mob + T4 * uc * dVbseff_dVb; dDenomi_dVd = (ua + 2.0 * ub * T3) * T2 * (dVgsteff_dVd + 2 * dVth_dVd) / toxe_mob + T4 * uc * dVbseff_dVd; dDenomi_dVe = (ua + 2.0 * ub * T3) * T2 * (dVgsteff_dVe + 2 * dVth_dVe) / toxe_mob + T4 * uc * dVbseff_dVe; if (selfheat) dDenomi_dT = (ua + 2.0 * ub * T3) * T2 * (dVgsteff_dT + 2 * dVth_dT) / toxe_mob + (dua_dT + dub_dT * T3) * T3 * T2 + T4 * (Vbseff * duc_dT + uc * dVbseff_dT); else dDenomi_dT = 0.0; } else /* mobMod == 4 */ { /*universal mobility*/ T0 = (Vgsteff + here->B4SOIvtfbphi1)* 1.0e-8 / toxe/6.0; T1 = exp(pParam->B4SOIeu * log(T0)); /* MobMod=4 does not use Eeff */ /* using 'toxe' keeps code consistent with BSIM4 formulation */ /* LFW_FD add/fix 5 derivatives */ dT1_dVg = T1 * pParam->B4SOIeu * 1.0e-8/ T0 / toxe/6.0 * dVgsteff_dVg; dT1_dVb = T1 * pParam->B4SOIeu * 1.0e-8/ T0 / toxe/6.0 * dVgsteff_dVb; dT1_dVd = T1 * pParam->B4SOIeu * 1.0e-8/ T0 / toxe/6.0 * dVgsteff_dVd; dT1_dVe = T1 * pParam->B4SOIeu * 1.0e-8/ T0 / toxe/6.0 * dVgsteff_dVe; dT1_dT = T1 * pParam->B4SOIeu * 1.0e-8/ T0 / toxe/6.0 * dVgsteff_dT; /*T2 = pParam->B4SOIua + pParam->B4SOIuc * Vbseff; */ /* v4.2 bugfix # 35 */ T2 = ua + uc * Vbseff; /*Coulombic*/ /* pParam->B4SOIucs = pParam->B4SOIucs * pow(TempRatio, pParam->B4SOIucste); Bug# 21 Jul09*/ /* pParam->B4SOIud = pParam->B4SOIud * pow(TempRatio, pParam->B4SOIud1) ; Bug# 21 Jul09 */ ucs = pParam->B4SOIucs * pow(TempRatio, pParam->B4SOIucste); ud = pParam->B4SOIud * pow(TempRatio, pParam->B4SOIud1) ; VgsteffVth = here->B4SOIvgsteffvth; /*T10 = exp(pParam->B4SOIucs * log(0.5 + 0.5 * Vgsteff/VgsteffVth));*/ /* T10 = exp(pParam->B4SOIucs * log(1.0 + Vgsteff/VgsteffVth)); Bug# 21 Jul09 */ /* T11 = pParam->B4SOIud/T10; Bug# 21 Jul09 */ T10 = exp(ucs * log(1.0 + Vgsteff/VgsteffVth)); /* Bug Fix # 21 Jul09*/ T11 = ud/T10; /* Bug Fix # 21 Jul09*/ /*dT11_dVg = - 0.5 * pParam->B4SOIucs * T11 /(0.5 + 0.5*Vgsteff/VgsteffVth)/VgsteffVth;*/ /* dT11_dVg = (pParam->B4SOIucs - 1.0)*pParam->B4SOIud/(VgsteffVth* exp((pParam->B4SOIucs-1.0) * log(1.0 + Vgsteff/VgsteffVth))); Bug# 21 Jul09*/ /* LFW_FD add/fix 5 derivatives */ dT11_dVg = - ud * ucs * exp(-(ucs+1.0) * log(1.0 + Vgsteff/VgsteffVth)) * dVgsteff_dVg / VgsteffVth; dT11_dVb = - ud * ucs * exp(-(ucs+1.0) * log(1.0 + Vgsteff/VgsteffVth)) * dVgsteff_dVb / VgsteffVth; dT11_dVd = - ud * ucs * exp(-(ucs+1.0) * log(1.0 + Vgsteff/VgsteffVth)) * dVgsteff_dVd / VgsteffVth; dT11_dVe = - ud * ucs * exp(-(ucs+1.0) * log(1.0 + Vgsteff/VgsteffVth)) * dVgsteff_dVe / VgsteffVth; dT11_dT = - ud * ucs * exp(-(ucs+1.0) * log(1.0 + Vgsteff/VgsteffVth)) * dVgsteff_dT / VgsteffVth; T5 = T1 * T2 + T11; /* LFW_FD add/fix 5 derivatives */ dDenomi_dVg = T2 * dT1_dVg + T1 * uc * dVbseff_dVg + dT11_dVg; dDenomi_dVb = T2 * dT1_dVb + T1 * uc * dVbseff_dVb + dT11_dVb; dDenomi_dVd = T2 * dT1_dVd + T1 * uc * dVbseff_dVd + dT11_dVd; dDenomi_dVe = T2 * dT1_dVe + T1 * uc * dVbseff_dVe + dT11_dVe; if (selfheat) dDenomi_dT = T2 * dT1_dT + T1 * (dua_dT + Vbseff * duc_dT + uc * dVbseff_dT) + dT11_dT; else dDenomi_dT = 0.0; } if (T5 >= -0.8) { Denomi = 1.0 + T5; } else /* Added to avoid the discontinuity problem caused by ua and ub*/ { T9 = 1.0 / (7.0 + 10.0 * T5); Denomi = (0.6 + T5) * T9; T9 *= T9; dDenomi_dVg *= T9; dDenomi_dVd *= T9; dDenomi_dVb *= T9; dDenomi_dVe *= T9; /* LFW_FD new line */ if (selfheat) dDenomi_dT *= T9; else dDenomi_dT = 0.0; } here->B4SOIueff = ueff = u0temp / Denomi; T9 = -ueff / Denomi; dueff_dVg = T9 * dDenomi_dVg; dueff_dVd = T9 * dDenomi_dVd; dueff_dVb = T9 * dDenomi_dVb; dueff_dVe = T9 * dDenomi_dVe; /* LFW_FD new line */ if (selfheat) dueff_dT = T9 * dDenomi_dT + du0temp_dT / Denomi; else dueff_dT = 0.0; /* Saturation Drain Voltage Vdsat */ WVCox = Weff * vsattemp * model->B4SOIcox; WVCoxRds = WVCox * Rds; /* LFW_FD add 4 derivatives */ dWVCoxRds_dVg = WVCox * dRds_dVg + Rds * vsattemp * model->B4SOIcox * dWeff_dVg; dWVCoxRds_dVb = WVCox * dRds_dVb + Rds * vsattemp * model->B4SOIcox * dWeff_dVb; dWVCoxRds_dVd = WVCox * dRds_dVd + Rds * vsattemp * model->B4SOIcox * dWeff_dVd; dWVCoxRds_dVe = WVCox * dRds_dVe + Rds * vsattemp * model->B4SOIcox * dWeff_dVe; /* 5 lines new - Wagner */ if (selfheat) dWVCoxRds_dT = model->B4SOIcox * Rds * (vsattemp * dWeff_dT + Weff * dvsattemp_dT) + WVCox * dRds_dT; else dWVCoxRds_dT = 0; /* dWVCoxRds_dT = WVCox * dRds_dT + Weff * model->B4SOIcox * Rds * dvsattemp_dT; */ Esat = 2.0 * vsattemp / ueff; EsatL = Esat * Leff; T0 = -EsatL /ueff; dEsatL_dVg = T0 * dueff_dVg; dEsatL_dVd = T0 * dueff_dVd; dEsatL_dVb = T0 * dueff_dVb; dEsatL_dVe = T0 * dueff_dVe; /* LFW_FD new line */ if (selfheat) dEsatL_dT = T0 * dueff_dT + EsatL / vsattemp * dvsattemp_dT; else dEsatL_dT = 0.0; /* Sqrt() */ a1 = pParam->B4SOIa1; if (a1 == 0.0) { Lambda = pParam->B4SOIa2; /* LFW_FD add/fix 5 derivatives */ dLambda_dVg = 0.0; dLambda_dVb = 0.0; dLambda_dVd = 0.0; dLambda_dVe = 0.0; dLambda_dT = 0.0; } else if (a1 > 0.0) /* Added to avoid the discontinuity problem caused by a1 and a2 (Lambda) */ { T0 = 1.0 - pParam->B4SOIa2; T1 = T0 - pParam->B4SOIa1 * Vgsteff - 0.0001; T2 = sqrt(T1 * T1 + 0.0004 * T0); Lambda = pParam->B4SOIa2 + T0 - 0.5 * (T1 + T2); /* LFW_FD add/fix 5 derivatives */ dLambda_dVg = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVg; dLambda_dVb = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVb; dLambda_dVd = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVd; dLambda_dVe = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVe; if (selfheat) { dT1_dT = - pParam->B4SOIa1 * dVgsteff_dT; dT2_dT = T1 * dT1_dT / T2; dLambda_dT = -0.5 * (dT1_dT + dT2_dT); } else dLambda_dT = 0.0; } else { T1 = pParam->B4SOIa2 + pParam->B4SOIa1 * Vgsteff - 0.0001; T2 = sqrt(T1 * T1 + 0.0004 * pParam->B4SOIa2); Lambda = 0.5 * (T1 + T2); /* LFW_FD add/fix 5 derivatives */ dLambda_dVg = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVg; dLambda_dVb = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVb; dLambda_dVd = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVd; dLambda_dVe = 0.5 * pParam->B4SOIa1 * (1.0 + T1 / T2) * dVgsteff_dVe; if (selfheat) { dT1_dT = pParam->B4SOIa1 * dVgsteff_dT; dT2_dT = T1 * dT1_dT / T2; dLambda_dT = 0.5 * (dT1_dT + dT2_dT); } else dLambda_dT = 0.0; } here->B4SOIAbovVgst2Vtm = Abulk /Vgst2Vtm; /* v2.2.3 bug fix */ if (Rds > 0) { tmp2 = dRds_dVg / Rds + dWeff_dVg / Weff; tmp3 = dRds_dVb / Rds + dWeff_dVb / Weff; } else { tmp2 = dWeff_dVg / Weff; tmp3 = dWeff_dVb / Weff; } if ((Rds == 0.0) && (Lambda == 1.0)) { T0 = 1.0 / (Abulk * EsatL + Vgst2Vtm); tmp1 = 0.0; T1 = T0 * T0; T2 = Vgst2Vtm * T0; T3 = EsatL * Vgst2Vtm; Vdsat = T3 * T0; /* LFW_FD add/fix 5 derivatives */ dT0_dVg = -(Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + dVgsteff_dVg) * T1; dT0_dVd = -(Abulk * dEsatL_dVd + EsatL * dAbulk_dVd + dVgsteff_dVd) * T1; dT0_dVb = -(Abulk * dEsatL_dVb + EsatL * dAbulk_dVb + dVgsteff_dVb) * T1; dT0_dVe = -(Abulk * dEsatL_dVe + EsatL * dAbulk_dVe + dVgsteff_dVe) * T1; if (selfheat) dT0_dT = -(Abulk * dEsatL_dT + dVgst2Vtm_dT) * T1; else dT0_dT = 0.0; /* LFW_FD add/fix 5 derivatives */ dVdsat_dVg = T3 * dT0_dVg + T2 * dEsatL_dVg + EsatL * T0 *dVgsteff_dVg; dVdsat_dVb = T3 * dT0_dVb + T2 * dEsatL_dVb + EsatL * T0 *dVgsteff_dVb; dVdsat_dVd = T3 * dT0_dVd + T2 * dEsatL_dVd + EsatL * T0 *dVgsteff_dVd; dVdsat_dVe = T3 * dT0_dVe + T2 * dEsatL_dVe + EsatL * T0 *dVgsteff_dVe; if (selfheat) dVdsat_dT = T3 * dT0_dT + T2 * dEsatL_dT + EsatL * T0 * dVgst2Vtm_dT; else dVdsat_dT = 0.0; } else { tmp1 = dLambda_dVg / (Lambda * Lambda); T9 = Abulk * WVCoxRds; T8 = Abulk * T9; T7 = Vgst2Vtm * T9; T6 = Vgst2Vtm * WVCoxRds; T0 = 2.0 * Abulk * (T9 - 1.0 + 1.0 / Lambda); /* LFW_FD add/fix 4 derivatives */ dT0_dVg = 2.0 * ((2.0 * Abulk * WVCoxRds - 1.0 + 1.0 / Lambda) * dAbulk_dVg + Abulk * Abulk * dWVCoxRds_dVg - Abulk * dLambda_dVg / (Lambda * Lambda)); dT0_dVb = 2.0 * ((2.0 * Abulk * WVCoxRds - 1.0 + 1.0 / Lambda) * dAbulk_dVb + Abulk * Abulk * dWVCoxRds_dVb - Abulk * dLambda_dVb / (Lambda * Lambda)); dT0_dVd = 2.0 * ((2.0 * Abulk * WVCoxRds - 1.0 + 1.0 / Lambda) * dAbulk_dVd + Abulk * Abulk * dWVCoxRds_dVd - Abulk * dLambda_dVd / (Lambda * Lambda)); dT0_dVe = 2.0 * ((2.0 * Abulk * WVCoxRds - 1.0 + 1.0 / Lambda) * dAbulk_dVe + Abulk * Abulk * dWVCoxRds_dVe - Abulk * dLambda_dVe / (Lambda * Lambda)); if (selfheat) { if (Rds!=0.0) tmp4 = dRds_dT / Rds + dvsattemp_dT / vsattemp; else tmp4 = dvsattemp_dT / vsattemp; /*fix below expression Wagner */ /*dT0_dT = 2.0 * T8 * tmp4; */ /*dT0_dT = 2.0 * T8 * tmp4 */ /* + 2.0 * dAbulk_dT * (T9-1.0+1.0/Lambda) */ /* + 2.0 * Abulk * (WVCoxRds*dAbulk_dT-dLambda_dT/(Lambda*Lambda)); */ /*fix again below expression Wagner */ dT0_dT = 2.0 * dAbulk_dT * (T9-1.0+1.0/Lambda) + 2.0 * Abulk * (WVCoxRds*dAbulk_dT+Abulk*dWVCoxRds_dT-dLambda_dT/(Lambda*Lambda)); } else tmp4 = dT0_dT = 0.0; T1 = Vgst2Vtm * (2.0 / Lambda - 1.0) + Abulk * EsatL + 3.0 * T7; /* LFW_FD add/fix 4 derivatives */ dT1_dVg = (2.0 / Lambda - 1.0) * dVgsteff_dVg - 2.0 * Vgst2Vtm * dLambda_dVg / (Lambda * Lambda) + EsatL * dAbulk_dVg + Abulk * dEsatL_dVg + 3.0 * (dVgsteff_dVg * Abulk * WVCoxRds + Vgst2Vtm * dAbulk_dVg * WVCoxRds + Vgst2Vtm * Abulk * dWVCoxRds_dVg); dT1_dVb = (2.0 / Lambda - 1.0) * dVgsteff_dVb - 2.0 * Vgst2Vtm * dLambda_dVb / (Lambda * Lambda) + EsatL * dAbulk_dVb + Abulk * dEsatL_dVb + 3.0 * (dVgsteff_dVb * Abulk * WVCoxRds + Vgst2Vtm * dAbulk_dVb * WVCoxRds + Vgst2Vtm * Abulk * dWVCoxRds_dVb); dT1_dVd = (2.0 / Lambda - 1.0) * dVgsteff_dVd - 2.0 * Vgst2Vtm * dLambda_dVd / (Lambda * Lambda) + EsatL * dAbulk_dVd + Abulk * dEsatL_dVd + 3.0 * (dVgsteff_dVd * Abulk * WVCoxRds + Vgst2Vtm * dAbulk_dVd * WVCoxRds + Vgst2Vtm * Abulk * dWVCoxRds_dVd); dT1_dVe = (2.0 / Lambda - 1.0) * dVgsteff_dVe - 2.0 * Vgst2Vtm * dLambda_dVe / (Lambda * Lambda) + EsatL * dAbulk_dVe + Abulk * dEsatL_dVe + 3.0 * (dVgsteff_dVe * Abulk * WVCoxRds + Vgst2Vtm * dAbulk_dVe * WVCoxRds + Vgst2Vtm * Abulk * dWVCoxRds_dVe); /* fix below "if" expresssion - Wagner */ /*if (selfheat) { tmp4 += dVgst2Vtm_dT / Vgst2Vtm; dT1_dT = (2.0 / Lambda - 1.0) * dVgst2Vtm_dT + Abulk * dEsatL_dT + 3.0 * T7 * tmp4; } else dT1_dT = 0.0; */ if (selfheat) dT1_dT = (2.0 / Lambda - 1.0) * dVgst2Vtm_dT - Vgst2Vtm * 2 * dLambda_dT / (Lambda*Lambda) + dAbulk_dT * EsatL + Abulk * dEsatL_dT + 3.0 * Vgst2Vtm * dAbulk_dT * WVCoxRds + 3.0 * dVgst2Vtm_dT * Abulk * WVCoxRds + 3.0 * T7 * tmp4; else dT1_dT = 0.0; T2 = Vgst2Vtm * (EsatL + 2.0 * T6); /* LFW_FD add/fix 4 derivatives */ dT2_dVg = dVgsteff_dVg * (EsatL + 4.0 * T6) + Vgst2Vtm * (dEsatL_dVg + 2 * Vgst2Vtm * dWVCoxRds_dVg); dT2_dVb = dVgsteff_dVb * (EsatL + 4.0 * T6) + Vgst2Vtm * (dEsatL_dVb + 2 * Vgst2Vtm * dWVCoxRds_dVb); dT2_dVd = dVgsteff_dVd * (EsatL + 4.0 * T6) + Vgst2Vtm * (dEsatL_dVd + 2 * Vgst2Vtm * dWVCoxRds_dVd); dT2_dVe = dVgsteff_dVe * (EsatL + 4.0 * T6) + Vgst2Vtm * (dEsatL_dVe + 2 * Vgst2Vtm * dWVCoxRds_dVe); if (selfheat) /* fix below expression - Wagner */ /*dT2_dT = Vgst2Vtm * dEsatL_dT + EsatL * dVgst2Vtm_dT + 2.0 * T6 * (dVgst2Vtm_dT + Vgst2Vtm * tmp4); */ dT2_dT = dVgst2Vtm_dT * (EsatL + 2.0 * T6) + Vgst2Vtm * (dEsatL_dT + 2.0 * T6 * tmp4 + 2.0 * dVgst2Vtm_dT * WVCoxRds); else dT2_dT = 0.0; T3 = sqrt(T1 * T1 - 2.0 * T0 * T2); Vdsat = (T1 - T3) / T0; dVdsat_dVg = (dT1_dVg - (T1 * dT1_dVg - dT0_dVg * T2 - T0 * dT2_dVg) / T3 - Vdsat * dT0_dVg) / T0; dVdsat_dVb = (dT1_dVb - (T1 * dT1_dVb - dT0_dVb * T2 - T0 * dT2_dVb) / T3 - Vdsat * dT0_dVb) / T0; /* LFW_FD add/fix 2 derivatives */ dVdsat_dVd = (dT1_dVd - (T1 * dT1_dVd - dT0_dVd * T2 - T0 * dT2_dVd) / T3 - Vdsat * dT0_dVd) / T0; dVdsat_dVe = (dT1_dVe - (T1 * dT1_dVe - dT0_dVe * T2 - T0 * dT2_dVe) / T3 - Vdsat * dT0_dVe) / T0; if (selfheat) dVdsat_dT = (dT1_dT - (T1 * dT1_dT - dT0_dT * T2 - T0 * dT2_dT) / T3 - Vdsat * dT0_dT) / T0; else dVdsat_dT = 0.0; } here->B4SOIvdsat = Vdsat; /* Effective Vds (Vdseff) Calculation */ T1 = Vdsat - Vds - pParam->B4SOIdelta; dT1_dVg = dVdsat_dVg; dT1_dVd = dVdsat_dVd - 1.0; dT1_dVb = dVdsat_dVb; dT1_dVe = dVdsat_dVe; /* LFW_FD new line */ dT1_dT = dVdsat_dT; T2 = sqrt(T1 * T1 + 4.0 * pParam->B4SOIdelta * Vdsat); T0 = T1 / T2; T3 = 2.0 * pParam->B4SOIdelta / T2; dT2_dVg = T0 * dT1_dVg + T3 * dVdsat_dVg; dT2_dVd = T0 * dT1_dVd + T3 * dVdsat_dVd; dT2_dVb = T0 * dT1_dVb + T3 * dVdsat_dVb; dT2_dVe = T0 * dT1_dVe + T3 * dVdsat_dVe; /* LFW_FD new line */ if (selfheat) dT2_dT = T0 * dT1_dT + T3 * dVdsat_dT; else dT2_dT = 0.0; Vdseff = Vdsat - 0.5 * (T1 + T2); dVdseff_dVg = dVdsat_dVg - 0.5 * (dT1_dVg + dT2_dVg); dVdseff_dVd = dVdsat_dVd - 0.5 * (dT1_dVd + dT2_dVd); dVdseff_dVb = dVdsat_dVb - 0.5 * (dT1_dVb + dT2_dVb); dVdseff_dVe = dVdsat_dVe - 0.5 * (dT1_dVe + dT2_dVe); /* LFW_FD new line */ if (selfheat) dVdseff_dT = dVdsat_dT - 0.5 * (dT1_dT + dT2_dT); else dVdseff_dT = 0.0; if (Vdseff > Vds) Vdseff = Vds; /* This code is added to fixed the problem caused by computer precision when Vds is very close to Vdseff. */ diffVds = Vds - Vdseff; here->B4SOIVdseff = Vdseff; /* v2.2.3 bug fix */ /* Calculate VAsat */ tmp4 = 1.0 - 0.5 * Abulk * Vdsat / Vgst2Vtm; T9 = WVCoxRds * Vgsteff; T8 = T9 / Vgst2Vtm; T0 = EsatL + Vdsat + 2.0 * T9 * tmp4; T7 = 2.0 * WVCoxRds * tmp4; /* LFW_FD fix/add 4 derivatives */ dT0_dVg = dEsatL_dVg + dVdsat_dVg + 2.0 * (tmp4 * (WVCoxRds * dVgsteff_dVg + dWVCoxRds_dVg * Vgsteff) - T9 * (0.5 * (Abulk * dVdsat_dVg + dAbulk_dVg * Vdsat - Abulk * Vdsat * dVgsteff_dVg / Vgst2Vtm) / Vgst2Vtm)); dT0_dVb = dEsatL_dVb + dVdsat_dVb + 2.0 * (tmp4 * (WVCoxRds * dVgsteff_dVb + dWVCoxRds_dVb * Vgsteff) - T9 * (0.5 * (Abulk * dVdsat_dVb + dAbulk_dVb * Vdsat - Abulk * Vdsat * dVgsteff_dVb / Vgst2Vtm) / Vgst2Vtm)); dT0_dVd = dEsatL_dVd + dVdsat_dVd + 2.0 * (tmp4 * (WVCoxRds * dVgsteff_dVd + dWVCoxRds_dVd * Vgsteff) - T9 * (0.5 * (Abulk * dVdsat_dVd + dAbulk_dVd * Vdsat - Abulk * Vdsat * dVgsteff_dVd / Vgst2Vtm) / Vgst2Vtm)); dT0_dVe = dEsatL_dVe + dVdsat_dVe + 2.0 * (tmp4 * (WVCoxRds * dVgsteff_dVe + dWVCoxRds_dVe * Vgsteff) - T9 * (0.5 * (Abulk * dVdsat_dVe + dAbulk_dVe * Vdsat - Abulk * Vdsat * dVgsteff_dVe / Vgst2Vtm) / Vgst2Vtm)); if (selfheat) { if (Rds!=0.0) tmp4 = dRds_dT / Rds + dvsattemp_dT / vsattemp; else tmp4 = dvsattemp_dT / vsattemp; /* fix below expression - Wagner */ /*dT0_dT = dEsatL_dT + dVdsat_dT + T7 * tmp4 * Vgsteff - T8 * (Abulk * dVdsat_dT - Abulk * Vdsat * dVgst2Vtm_dT / Vgst2Vtm); */ dT0_dT = dEsatL_dT + dVdsat_dT + T7 * (dVgsteff_dT + Vgsteff * tmp4) - T9 * (dAbulk_dT * Vdsat + Abulk * dVdsat_dT - Abulk * Vdsat * dVgst2Vtm_dT / Vgst2Vtm) / Vgst2Vtm; } else dT0_dT = 0.0; T9 = WVCoxRds * Abulk; T1 = 2.0 / Lambda - 1.0 + T9; /* LFW_FD fix/add 4 derivatives */ dT1_dVg = -2.0 * dLambda_dVg / (Lambda * Lambda) + WVCoxRds * dAbulk_dVg + dWVCoxRds_dVg * Abulk; dT1_dVb = -2.0 * dLambda_dVb / (Lambda * Lambda) + WVCoxRds * dAbulk_dVb + dWVCoxRds_dVb * Abulk; dT1_dVd = -2.0 * dLambda_dVd / (Lambda * Lambda) + WVCoxRds * dAbulk_dVd + dWVCoxRds_dVd * Abulk; dT1_dVe = -2.0 * dLambda_dVe / (Lambda * Lambda) + WVCoxRds * dAbulk_dVe + dWVCoxRds_dVe * Abulk; if (selfheat) /* fix below expression - Wagner */ /*dT1_dT = T9 * tmp4;*/ dT1_dT = - 2.0 * dLambda_dT / (Lambda*Lambda) /* + T9 * tmp4 + WVCoxRds * dAbulk_dT; fix again */ + WVCoxRds * dAbulk_dT + dWVCoxRds_dT * Abulk; else dT1_dT = 0.0; Vasat = T0 / T1; dVasat_dVg = (dT0_dVg - Vasat * dT1_dVg) / T1; dVasat_dVb = (dT0_dVb - Vasat * dT1_dVb) / T1; /* LFW_FD fix/add 2 derivatives */ dVasat_dVd = (dT0_dVd - Vasat * dT1_dVd) / T1; dVasat_dVe = (dT0_dVe - Vasat * dT1_dVe) / T1; if (selfheat) dVasat_dT = (dT0_dT - Vasat * dT1_dT) / T1; else dVasat_dT = 0.0; /* Calculate VACLM */ if ((pParam->B4SOIpclm > 0.0) && (diffVds > 1.0e-10)) { T0 = 1.0 / (pParam->B4SOIpclm * Abulk * pParam->B4SOIlitl); dT0_dVb = -T0 / Abulk * dAbulk_dVb; dT0_dVg = -T0 / Abulk * dAbulk_dVg; /* LFW_FD add 2 derivatives */ dT0_dVd = -T0 / Abulk * dAbulk_dVd; dT0_dVe = -T0 / Abulk * dAbulk_dVe; T2 = Vgsteff / EsatL; T1 = Leff * (Abulk + T2); /* LFW_FD add/fix 4 derivatives */ dT1_dVg = Leff * (dAbulk_dVg + (dVgsteff_dVg - T2 * dEsatL_dVg) / EsatL); dT1_dVb = Leff * (dAbulk_dVb + (dVgsteff_dVb - T2 * dEsatL_dVb) / EsatL); dT1_dVd = Leff * (dAbulk_dVd + (dVgsteff_dVd - T2 * dEsatL_dVd) / EsatL); dT1_dVe = Leff * (dAbulk_dVe + (dVgsteff_dVe - T2 * dEsatL_dVe) / EsatL); /* fix below expression - Wagner */ /*if (selfheat) dT1_dT = -T2 * dEsatL_dT / Esat; */ if (selfheat) dT1_dT = Leff * (dAbulk_dT + (dVgsteff_dT - T2 * dEsatL_dT) / EsatL); else dT1_dT = 0.0; T9 = T0 * T1; VACLM = T9 * diffVds; dVACLM_dVg = T0 * dT1_dVg * diffVds - T9 * dVdseff_dVg + T1 * diffVds * dT0_dVg; dVACLM_dVb = (dT0_dVb * T1 + T0 * dT1_dVb) * diffVds - T9 * dVdseff_dVb; /* LFW_FD add/fix 2 derivatives */ dVACLM_dVd = (dT0_dVd * T1 + T0 * dT1_dVd) * diffVds + T9 * (1.0 - dVdseff_dVd); dVACLM_dVe = (dT0_dVe * T1 + T0 * dT1_dVe) * diffVds - T9 * dVdseff_dVe; if (selfheat) /* fix below expression - Wagner */ /*dVACLM_dT = T0 * dT1_dT * diffVds - T9 * dVdseff_dT;*/ dVACLM_dT = - T9 * dVdseff_dT + diffVds * (T0 * dT1_dT - T1 * T0 * dAbulk_dT / Abulk); else dVACLM_dT = 0.0; } else { VACLM = MAX_EXPL; dVACLM_dVd = dVACLM_dVg = dVACLM_dVb = dVACLM_dVe = dVACLM_dT = 0.0; /* LFW_FD expand line */ } /* Calculate VADIBL */ /* if (pParam->B4SOIthetaRout > 0.0) */ /* v4.2 bugfix # 36 */ if (thetaRout > 0.0) { T8 = Abulk * Vdsat; T0 = Vgst2Vtm * T8; T1 = Vgst2Vtm + T8; /* LFW_FD fix/add 4 derivatives */ dT0_dVg = T8 * dVgsteff_dVg + Vgst2Vtm * (Abulk * dVdsat_dVg + dAbulk_dVg * Vdsat); dT0_dVb = T8 * dVgsteff_dVb + Vgst2Vtm * (Abulk * dVdsat_dVb + dAbulk_dVb * Vdsat); dT0_dVd = T8 * dVgsteff_dVd + Vgst2Vtm * (Abulk * dVdsat_dVd + dAbulk_dVd * Vdsat); dT0_dVe = T8 * dVgsteff_dVe + Vgst2Vtm * (Abulk * dVdsat_dVe + dAbulk_dVe * Vdsat); /* LFW_FD fix/add 4 derivatives */ dT1_dVg = dVgsteff_dVg + Abulk * dVdsat_dVg + dAbulk_dVg * Vdsat; dT1_dVb = dVgsteff_dVb + Abulk * dVdsat_dVb + dAbulk_dVb * Vdsat; dT1_dVd = dVgsteff_dVd + Abulk * dVdsat_dVd + dAbulk_dVd * Vdsat; dT1_dVe = dVgsteff_dVe + Abulk * dVdsat_dVe + dAbulk_dVe * Vdsat; if (selfheat) { /* fix below expression - Wagner */ /*dT0_dT = dVgst2Vtm_dT * T8 + Abulk * Vgst2Vtm * dVdsat_dT;*/ dT0_dT = dVgst2Vtm_dT * T8 + Vgst2Vtm * dAbulk_dT * Vdsat + Vgst2Vtm * Abulk * dVdsat_dT; /* fix below expression - Wagner */ /*dT1_dT = dVgst2Vtm_dT + Abulk * dVdsat_dT;*/ dT1_dT = dVgst2Vtm_dT + dAbulk_dT * Vdsat + Abulk * dVdsat_dT; } else dT0_dT = dT1_dT = 0.0; T9 = T1 * T1; /*T2 = pParam->B4SOIthetaRout; */ /* v4.2 bugfix # 36 */ T2 = thetaRout; VADIBL = (Vgst2Vtm - T0 / T1) / T2; /* LFW_FD fix/add 4 derivatives */ dVADIBL_dVg = (dVgsteff_dVg - (dT0_dVg - T0 * dT1_dVg /T1 )/T1) / T2; dVADIBL_dVb = (dVgsteff_dVb - (dT0_dVb - T0 * dT1_dVb /T1 )/T1) / T2; dVADIBL_dVd = (dVgsteff_dVd - (dT0_dVd - T0 * dT1_dVd /T1 )/T1) / T2; dVADIBL_dVe = (dVgsteff_dVe - (dT0_dVe - T0 * dT1_dVe /T1 )/T1) / T2; if (selfheat) /*fix below expression Wagner */ /*dVADIBL_dT = (dVgst2Vtm_dT - dT0_dT/T1 + T0*dT1_dT/T9) / T2;*/ dVADIBL_dT = (dVgst2Vtm_dT - dT0_dT/T1 + T0*dT1_dT/T9) / T2 - VADIBL * dthetaRout_dT / T2; else dVADIBL_dT = 0.0; T7 = pParam->B4SOIpdiblb * Vbseff; if (T7 >= -0.9) { T3 = 1.0 / (1.0 + T7); VADIBL *= T3; /* LFW_FD fix/add 4 derivatives */ dVADIBL_dVg = (dVADIBL_dVg - VADIBL * pParam->B4SOIpdiblb * dVbseff_dVg) * T3; dVADIBL_dVb = (dVADIBL_dVb - VADIBL * pParam->B4SOIpdiblb * dVbseff_dVb) * T3; dVADIBL_dVd = (dVADIBL_dVd - VADIBL * pParam->B4SOIpdiblb * dVbseff_dVd) * T3; dVADIBL_dVe = (dVADIBL_dVe - VADIBL * pParam->B4SOIpdiblb * dVbseff_dVe) * T3; /*fix below expression Wagner */ /*if (selfheat) dVADIBL_dT *= T3;*/ if (selfheat) dVADIBL_dT = T3 * dVADIBL_dT - VADIBL*pParam->B4SOIpdiblb*dVbseff_dT/(1.0+T7); else dVADIBL_dT = 0.0; } else /* Added to avoid the discontinuity problem caused by pdiblcb */ { T4 = 1.0 / (0.8 + T7); T3 = (17.0 + 20.0 * T7) * T4; /* LFW_FD fix/add 4 derivatives */ dVADIBL_dVg = dVADIBL_dVg * T3 + VADIBL * (20.0 - T3) * T4 * pParam->B4SOIpdiblb * dVbseff_dVg; dVADIBL_dVb = dVADIBL_dVb * T3 + VADIBL * (20.0 - T3) * T4 * pParam->B4SOIpdiblb * dVbseff_dVb; dVADIBL_dVd = dVADIBL_dVd * T3 + VADIBL * (20.0 - T3) * T4 * pParam->B4SOIpdiblb * dVbseff_dVd; dVADIBL_dVe = dVADIBL_dVe * T3 + VADIBL * (20.0 - T3) * T4 * pParam->B4SOIpdiblb * dVbseff_dVe; /*fix below expression Wagner */ /*if (selfheat) dVADIBL_dT *= T3;*/ if (selfheat) dVADIBL_dT = T3 * dVADIBL_dT + VADIBL * (20.0*T4 - T3/(0.8 + T7)) * pParam->B4SOIpdiblb*dVbseff_dT; else dVADIBL_dT = 0.0; VADIBL *= T3; } } else { VADIBL = MAX_EXPL; dVADIBL_dVd = dVADIBL_dVg = dVADIBL_dVb = dVADIBL_dVe = dVADIBL_dT = 0.0; /* LFW_FD enhance line */ } /* v4.0 DITS */ T0 = pParam->B4SOIpditsd * Vds; if (T0 > EXPL_THRESHOLD) { T1 = MAX_EXPL; dT1_dVd = 0; } else { T1 = exp(T0); dT1_dVd = T1 * pParam->B4SOIpditsd; } if (pParam->B4SOIpdits > MIN_EXPL) { T2 = 1.0 + model->B4SOIpditsl * Leff; VADITS = (1.0 + T2 * T1) / pParam->B4SOIpdits; dVADITS_dVg = VADITS * dFP_dVg; /* LFW_FD fix/add 3 derivatives */ dVADITS_dVd = VADITS * dFP_dVd + FP * T2 * dT1_dVd / pParam->B4SOIpdits; dVADITS_dVb = VADITS * dFP_dVb; dVADITS_dVe = VADITS * dFP_dVe; VADITS *= FP; if (selfheat) dVADITS_dT = VADITS * dFP_dT / FP; else dVADITS_dT = 0.0; } else { VADITS = MAX_EXPL; dVADITS_dVg = dVADITS_dVd = dVADITS_dVb = dVADITS_dVe = dVADITS_dT = 0; /* LFW_FD enhance line */ } /* Calculate VA */ T8 = pParam->B4SOIpvag / EsatL; T9 = T8 * Vgsteff; if (T9 > -0.9) { T0 = 1.0 + T9; /* LFW_FD fix/add 4 derivatives */ dT0_dVg = T8 * dVgsteff_dVg - T9 * dEsatL_dVg / EsatL; dT0_dVb = T8 * dVgsteff_dVb - T9 * dEsatL_dVb / EsatL; dT0_dVd = T8 * dVgsteff_dVd - T9 * dEsatL_dVd / EsatL; dT0_dVe = T8 * dVgsteff_dVe - T9 * dEsatL_dVe / EsatL; if (selfheat) /* fix below expression - Wagner */ /*dT0_dT = -T9 * dEsatL_dT / EsatL;*/ dT0_dT = T8 * dVgsteff_dT - T9 * dEsatL_dT / EsatL; else dT0_dT = 0.0; } else /* Added to avoid the discontinuity problems caused by pvag */ { TL1 = T1 = 1.0 / (17.0 + 20.0 * T9); /* change LHS name - Wagner */ T0 = (0.8 + T9) * T1; T1 *= T1; T9 *= T1 / EsatL; /* LFW_FD fix/add 4 derivatives */ dT0_dVg = (1.0 - 20.0 * T0) * TL1 * (T8 * dVgsteff_dVg - T9 * dEsatL_dVg / EsatL); dT0_dVb = (1.0 - 20.0 * T0) * TL1 * (T8 * dVgsteff_dVb - T9 * dEsatL_dVb / EsatL); dT0_dVd = (1.0 - 20.0 * T0) * TL1 * (T8 * dVgsteff_dVd - T9 * dEsatL_dVd / EsatL); dT0_dVe = (1.0 - 20.0 * T0) * TL1 * (T8 * dVgsteff_dVe - T9 * dEsatL_dVe / EsatL); if (selfheat) /* fix below expression - Wagner */ /*dT0_dT = -T9 * dEsatL_dT;*/ dT0_dT = TL1 * (1.0 - 20.0 * T0) * (T8 * dVgsteff_dT - T8 * Vgsteff * dEsatL_dT / EsatL); else dT0_dT = 0.0; } tmp1 = VACLM * VACLM; tmp2 = VADIBL * VADIBL; tmp3 = VACLM + VADIBL; T1 = VACLM * VADIBL / tmp3; tmp3 *= tmp3; dT1_dVg = (tmp1 * dVADIBL_dVg + tmp2 * dVACLM_dVg) / tmp3; dT1_dVd = (tmp1 * dVADIBL_dVd + tmp2 * dVACLM_dVd) / tmp3; dT1_dVb = (tmp1 * dVADIBL_dVb + tmp2 * dVACLM_dVb) / tmp3; dT1_dVe = (tmp1 * dVADIBL_dVe + tmp2 * dVACLM_dVe) / tmp3; /* LFW_FD new line */ if (selfheat) /*fix below expression - Wagner */ /*dT1_dT = (tmp1 * dVADIBL_dT + tmp2 * dVACLM_dT ) / tmp3;*/ dT1_dT = (dVACLM_dT * VADIBL + VACLM * dVADIBL_dT - T1 * (dVACLM_dT + dVADIBL_dT))/ (VACLM + VADIBL); else dT1_dT = 0.0; /* v4.0 adding DITS */ tmp1 = T1 * T1; tmp2 = VADITS * VADITS; tmp3 = T1 + VADITS; T2 = T1 * VADITS / tmp3; tmp3 *= tmp3; dT2_dVg = (tmp1 * dVADITS_dVg + tmp2 * dT1_dVg) / tmp3; dT2_dVd = (tmp1 * dVADITS_dVd + tmp2 * dT1_dVd) / tmp3; /* LFW_FD fix/add 2 derivatives */ dT2_dVb = (tmp1 * dVADITS_dVb + tmp2 * dT1_dVb) / tmp3; dT2_dVe = (tmp1 * dVADITS_dVe + tmp2 * dT1_dVe) / tmp3; if (selfheat) /*fix below expression - Wagner */ /*dT2_dT = (tmp1 * dVADITS_dT + tmp2 * dT1_dT ) / tmp3;*/ dT2_dT = (dT1_dT * VADITS + T1 * dVADITS_dT - T2 * (dT1_dT + dVADITS_dT))/(T1 + VADITS); else dT2_dT = 0.0; /* Va = Vasat + T0 * T1; dVa_dVg = dVasat_dVg + T1 * dT0_dVg + T0 * dT1_dVg; dVa_dVd = dVasat_dVd + T1 * dT0_dVd + T0 * dT1_dVd; dVa_dVb = dVasat_dVb + T1 * dT0_dVb + T0 * dT1_dVb; if (selfheat) dVa_dT = dVasat_dT + T1 * dT0_dT + T0 * dT1_dT; else dVa_dT = 0.0; */ /* v4.0 */ Va = Vasat + T0 * T2; dVa_dVg = dVasat_dVg + T2 * dT0_dVg + T0 * dT2_dVg; dVa_dVd = dVasat_dVd + T2 * dT0_dVd + T0 * dT2_dVd; dVa_dVb = dVasat_dVb + T2 * dT0_dVb + T0 * dT2_dVb; dVa_dVe = dVasat_dVe + T2 * dT0_dVe + T0 * dT2_dVe; /* LFW_FD new line */ if (selfheat) dVa_dT = dVasat_dT + T2 * dT0_dT + T0 * dT2_dT; else dVa_dT = 0.0; /* Calculate Ids */ CoxWovL = model->B4SOIcox * Weff / Leff; beta = ueff * CoxWovL; dbeta_dVg = CoxWovL * dueff_dVg + beta * dWeff_dVg / Weff ; /* LFW_FD fix/add 3 derivatives */ dbeta_dVd = CoxWovL * dueff_dVd + beta * dWeff_dVd / Weff ; dbeta_dVb = CoxWovL * dueff_dVb + beta * dWeff_dVb / Weff ; dbeta_dVe = CoxWovL * dueff_dVe + beta * dWeff_dVe / Weff ; /* fix below if expresssion - Wagner */ /*if (selfheat) dbeta_dT = CoxWovL * dueff_dT; */ if (selfheat) dbeta_dT = CoxWovL * dueff_dT + beta * dWeff_dT / Weff ; else dbeta_dT = 0.0; T0 = 1.0 - 0.5 * Abulk * Vdseff / Vgst2Vtm; /* LFW_FD fix/add 4 derivatives */ dT0_dVg = -0.5 * (Abulk * dVdseff_dVg + dAbulk_dVg * Vdseff -Abulk * Vdseff * dVgsteff_dVg / Vgst2Vtm) / Vgst2Vtm; dT0_dVb = -0.5 * (Abulk * dVdseff_dVb + dAbulk_dVb * Vdseff -Abulk * Vdseff * dVgsteff_dVb / Vgst2Vtm) / Vgst2Vtm; dT0_dVd = -0.5 * (Abulk * dVdseff_dVd + dAbulk_dVd * Vdseff -Abulk * Vdseff * dVgsteff_dVd / Vgst2Vtm) / Vgst2Vtm; dT0_dVe = -0.5 * (Abulk * dVdseff_dVe + dAbulk_dVe * Vdseff -Abulk * Vdseff * dVgsteff_dVe / Vgst2Vtm) / Vgst2Vtm; if (selfheat) /* fix first line of below expression - Wagner */ /*dT0_dT = -0.5 * (Abulk * dVdseff_dT */ dT0_dT = -0.5 * (Abulk * dVdseff_dT + dAbulk_dT * Vdseff - Abulk * Vdseff / Vgst2Vtm * dVgst2Vtm_dT) / Vgst2Vtm; else dT0_dT = 0.0; fgche1 = Vgsteff * T0; /* LFW_FD fix/add 4 derivatives */ dfgche1_dVg = Vgsteff * dT0_dVg + dVgsteff_dVg * T0; dfgche1_dVb = Vgsteff * dT0_dVb + dVgsteff_dVb * T0; dfgche1_dVd = Vgsteff * dT0_dVd + dVgsteff_dVd * T0; dfgche1_dVe = Vgsteff * dT0_dVe + dVgsteff_dVe * T0; /* fix below expression - Wagner */ /*if (selfheat) dfgche1_dT = Vgsteff * dT0_dT;*/ if (selfheat) dfgche1_dT = Vgsteff * dT0_dT + T0 * dVgsteff_dT; else dfgche1_dT = 0.0; T9 = Vdseff / EsatL; fgche2 = 1.0 + T9; dfgche2_dVg = (dVdseff_dVg - T9 * dEsatL_dVg) / EsatL; dfgche2_dVd = (dVdseff_dVd - T9 * dEsatL_dVd) / EsatL; dfgche2_dVb = (dVdseff_dVb - T9 * dEsatL_dVb) / EsatL; dfgche2_dVe = (dVdseff_dVe - T9 * dEsatL_dVe) / EsatL; /* LFW_FD new line */ if (selfheat) dfgche2_dT = (dVdseff_dT - T9 * dEsatL_dT) / EsatL; else dfgche2_dT = 0.0; gche = beta * fgche1 / fgche2; dgche_dVg = (beta * dfgche1_dVg + fgche1 * dbeta_dVg - gche * dfgche2_dVg) / fgche2; dgche_dVd = (beta * dfgche1_dVd + fgche1 * dbeta_dVd - gche * dfgche2_dVd) / fgche2; dgche_dVb = (beta * dfgche1_dVb + fgche1 * dbeta_dVb - gche * dfgche2_dVb) / fgche2; /* LFW_FD add 1 derivative */ dgche_dVe = (beta * dfgche1_dVe + fgche1 * dbeta_dVe - gche * dfgche2_dVe) / fgche2; if (selfheat) dgche_dT = (beta * dfgche1_dT + fgche1 * dbeta_dT - gche * dfgche2_dT) / fgche2; else dgche_dT = 0.0; T0 = 1.0 + gche * Rds; T9 = Vdseff / T0; Idl = gche * T9; IdlovVdseff = gche / T0; /* Whoa, these formulas for the derivatives of Idl are convoluted, but I verified them to be correct */ dIdl_dVg = (gche * dVdseff_dVg + T9 * dgche_dVg) / T0 - Idl * gche / T0 * dRds_dVg ; /* LFW_FD fix/add 3 derivatives */ dIdl_dVd = (gche * dVdseff_dVd + T9 * dgche_dVd - Idl * dRds_dVd * gche) / T0; dIdl_dVb = (gche * dVdseff_dVb + T9 * dgche_dVb - Idl * dRds_dVb * gche) / T0; dIdl_dVe = (gche * dVdseff_dVe + T9 * dgche_dVe - Idl * dRds_dVe * gche) / T0; if (selfheat) dIdl_dT = (gche * dVdseff_dT + T9 * dgche_dT - Idl * dRds_dT * gche) / T0; else dIdl_dT = 0.0; T9 = diffVds / Va; T0 = 1.0 + T9; here->B4SOIids = Ids = Idl * T0 / here->B4SOInseg; /* LFW_FD add 4 derivatives */ dIds_dVg = (dIdl_dVg * T0 - Idl * (dVdseff_dVg + T9 * dVa_dVg) / Va)/ here->B4SOInseg; dIds_dVb = (dIdl_dVb * T0 - Idl * (dVdseff_dVb + T9 * dVa_dVb) / Va)/ here->B4SOInseg; dIds_dVd = (dIdl_dVd * T0 + Idl * (1.0 - dVdseff_dVd - T9 * dVa_dVd) / Va)/ here->B4SOInseg; dIds_dVe = (dIdl_dVe * T0 - Idl * (dVdseff_dVe + T9 * dVa_dVe) / Va)/ here->B4SOInseg; /* 5 new lines Wagner */ if (selfheat) dIds_dT = dIdl_dT * T0 / here->B4SOInseg + Idl * (-dVdseff_dT/Va -diffVds/Va/Va*dVa_dT) / here->B4SOInseg; else dIds_dT = 0.0; here->B4SOIidovVds = IdlovVdseff * T0 / here->B4SOInseg; /* v4.0 bug fix */ /* IdovVds = IdlovVdseff * T0 / here->B4SOInseg; LFW_FD not needed */ Gm0 = T0 * dIdl_dVg - Idl * (dVdseff_dVg + T9 * dVa_dVg) / Va; Gds0 = T0 * dIdl_dVd + Idl * (1.0 - dVdseff_dVd - T9 * dVa_dVd) / Va; Gmb0 = T0 * dIdl_dVb - Idl * (dVdseff_dVb + T9 * dVa_dVb) / Va; Gme0 = dIdl_dVe * T0 - Idl * (dVdseff_dVe + T9 * dVa_dVe) / Va; /* LFW_FD new line */ /*Gmc = 0.0; LFW_FD not used */ if (selfheat) GmT0 = T0 * dIdl_dT - Idl * (dVdseff_dT + T9 * dVa_dT) / Va; else GmT0 = 0.0; /* This includes all dependencies from Vgsteff, Vbseff */ /*Gm = (Gm0 * dVgsteff_dVg+ Gmb0 * dVbseff_dVg) / here->B4SOInseg; v3.0 */ /*Gmb = (Gm0 * dVgsteff_dVb + Gmb0 * dVbseff_dVb) / here->B4SOInseg; */ /*Gds = (Gm0 * dVgsteff_dVd+ Gmb0 * dVbseff_dVd + Gds0) / here->B4SOInseg; v3.0 */ /*Gme = (Gm0 * dVgsteff_dVe + Gmb0 * dVbseff_dVe) / here->B4SOInseg; v3.0 */ /* LFW_FD fix 4 derivatives */ Gm = dIds_dVg; Gmb = dIds_dVb; Gds = dIds_dVd; Gme = dIds_dVe; if (selfheat) /* fix below expression Wagner */ /* GmT = (Gm0 * dVgsteff_dT + Gmb0 * dVbseff_dT + GmT0) / here->B4SOInseg; v3.0 */ GmT = dIds_dT; else GmT = 0.0; /* LFW_FD flexilint inits */ Ibsdif = dIbsdif_dVb = dIbsdif_dT = 0; Ibddif = dIbddif_dVb = dIbddif_dT = 0; Ibs1 = dIbs1_dVb = dIbs1_dT = Ibd1 = dIbd1_dVb = dIbd1_dVd = dIbd1_dT = 0; Ibs2 = dIbs2_dVb = dIbs2_dT = Ibd2 = dIbd2_dVb = dIbd2_dVd = dIbd2_dT = 0; Ibs3 = dIbs3_dVb = dIbs3_dT = Ibd3 = dIbd3_dVb = dIbd3_dVd = dIbd3_dT = 0; Ibs4 = dIbs4_dVb = dIbs4_dT = Ibd4 = dIbd4_dVb = dIbd4_dVd = dIbd4_dT = 0; Igisl = Ggisls = Ggislg = Ggislb = 0.0; dIc_dVd = dIc_dVb = 0.0; /* v3.1 */ if (here->B4SOIsoiMod != 2) /* v3.2 */ { /* calculate GISL/GIDL current */ /*4.1*/ if(model->B4SOImtrlMod == 0) T0 = 3.0 * 3.9 / epsrox * toxe; else T0 = model->B4SOIepsrsub * toxe / epsrox; if (model->B4SOIgidlMod==0) { /*fix next if-then-else block Wagner */ if (model->B4SOImtrlMod==0) { /* T1 = (- Vds - Vgs_eff - egisl) / T0; *//* Bug # 25 Jul09*/ T1 = (- Vds - Vgd_eff - egisl) / T0; dTL1_dT = -dVgd_eff_dT / T0; } else { /* T1 = (- Vds - Vgs_eff - egisl+pParam->B4SOIvfbsd) / T0; */ T1 = (- Vds - Vgd_eff - egisl + pParam->B4SOIvfbsd) / T0; dTL1_dT = -dVgd_eff_dT / T0; } /* GISL */ if ((agisl <= 0.0) || (bgisl <= 0.0) || (T1 <= 0.0) || /*(cgisl < 0.0) || (Vbd > 0.0) ) */ /* v4.2 Bug # 24 Jul09*/ (cgisl < 0.0) || (Vbs > 0.0) ) Igisl = Ggisls = Ggislg = Ggislb = Ggislt = 0.0; /* enhanced line Wagner */ else { dT1_dVd = 1 / T0; /* dT1_dVg = - dT1_dVd * dVgs_eff_dVg; *//* Bug fix # 25 Jul09 */ dT1_dVg = - dT1_dVd * dVgd_eff_dVg; T2 = bgisl / T1; if (T2 < EXPL_THRESHOLD) { Igisl = wdios * agisl * T1 * exp(-T2); T3 = Igisl / T1 * (T2 + 1); Ggisls = T3 * dT1_dVd; /* Ggisls = T3 * dT1_dVg; */ /* Bug # 28 Jul09*/ Ggislg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggislt = T3 * dTL1_dT; else Ggislt = 0.0; } else { T3 = wdios * agisl * MIN_EXPL; Igisl = T3 * T1 ; Ggisls = T3 * dT1_dVd; Ggislg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggislt = T3 * dTL1_dT; else Ggislt = 0.0; } if(cgisl >= MIN_EXPL) { T4 = Vbs * Vbs; T5 = -Vbs * T4; T6 = cgisl + T5; T7 = T5 / T6; T8 = 3.0 * cgisl * T4 / T6 / T6; Ggisls = Ggisls * T7 + Igisl * T8; Ggislg = Ggislg * T7; Ggislb = -Igisl * T8; /* 3 new lines Wagner */ if (selfheat) Ggislt = Ggislt * T7; else Ggislt = 0.0; Igisl *= T7; } else Ggislb = 0.0; } here->B4SOIigisl = Igisl; /* End of GISL */ /* enhance next if-then-else block Wagner */ if (model->B4SOImtrlMod==0) { T1 = (Vds - Vgs_eff - egidl) / T0; dTL1_dT = -dVgs_eff_dT / T0; } else { T1 = (Vds - Vgs_eff - egidl+pParam->B4SOIvfbsd) / T0; dTL1_dT = -dVgs_eff_dT / T0; } /* GIDL */ if ((agidl <= 0.0) || (bgidl <= 0.0) || (T1 <= 0.0) || (cgidl < 0.0) || (Vbd > 0.0) ) Igidl = Ggidld = Ggidlg = Ggidlb = Ggidlt = 0.0; /* enhanced line Wagner */ else { dT1_dVd = 1 / T0; dT1_dVg = - dT1_dVd * dVgs_eff_dVg; T2 = bgidl / T1; if (T2 < EXPL_THRESHOLD) { Igidl = wdiod * agidl * T1 * exp(-T2); T3 = Igidl / T1 * (T2 + 1); Ggidld = T3 * dT1_dVd; Ggidlg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggidlt = T3 * dTL1_dT; else Ggidlt = 0.0; } else { T3 = wdiod * agidl * MIN_EXPL; Igidl = T3 * T1 ; Ggidld = T3 * dT1_dVd; Ggidlg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggidlt = T3 * dTL1_dT; else Ggidlt = 0.0; } if(cgidl >= MIN_EXPL) { T4 = Vbd * Vbd; T5 = -Vbd * T4; T6 = cgidl + T5; T7 = T5 / T6; T8 = 3.0 * cgidl * T4 / T6 / T6; Ggidld = Ggidld * T7 + Igidl * T8; Ggidlg = Ggidlg * T7; Ggidlb = -Igidl * T8; /* 3 new lines Wagner */ if (selfheat) Ggidlt = Ggidlt * T7; else Ggidlt = 0.0; Igidl *= T7; } else Ggidlb = 0.0; } here->B4SOIigidl = Igidl; /* End of GIDL*/ } else { /* enhance next if-then-else block Wagner */ if (model->B4SOImtrlMod==0) { /* T1 = (-Vds - rgisl*Vgs_eff - pParam->B4SOIegisl) / T0;*/ T1 = (-Vds - rgisl*Vgd_eff - egisl) / T0; /* Bug # 26 Jul09*/ dTL1_dT = -rgisl * dVgd_eff_dT / T0; } else { /* T1 = (-Vds - rgisl*Vgs_eff - pParam->B4SOIegisl+pParam->B4SOIvfbsd) / T0; */ T1 = (-Vds - rgisl*Vgd_eff - egisl + pParam->B4SOIvfbsd) / T0; /* Bug # 26 Jul09*/ dTL1_dT = -rgisl * dVgd_eff_dT / T0; } /* GISL */ if ((agisl <= 0.0) || (bgisl <= 0.0) || (T1 <= 0.0) || (cgisl < 0.0) ) Igisl = Ggisls = Ggislg = Ggislb = Ggislt = 0.0; /* enhanced line Wagner */ else { dT1_dVd = 1 / T0; /* dT1_dVg = - rgisl*dT1_dVd * dVgs_eff_dVg;*//*Bug fix #26*/ dT1_dVg = - rgisl*dT1_dVd * dVgd_eff_dVg; T2 = bgisl / T1; if (T2 < EXPL_THRESHOLD) { Igisl = wdios * agisl * T1 * exp(-T2); T3 = Igisl / T1 * (T2 + 1); Ggisls = T3 * dT1_dVd; Ggislg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggislt = T3 * dTL1_dT; else Ggislt = 0.0; } else { T3 = wdios * agisl * MIN_EXPL; Igisl = T3 * T1 ; Ggisls = T3 * dT1_dVd; Ggislg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggislt = T3 * dTL1_dT; else Ggislt = 0.0; } T4 = Vbs - fgisl; /*if (T4==0) T5 =1; else T5 = kgisl/T4; T6 = exp(T5); if (T6<EXPL_THRESHOLD) {Ggisls*=exp(T5); Ggislg*=exp(T5); Ggislb = -Igisl*exp(T5)*T5/T4; Igisl*=exp(T5); } else Ggislb=0.0; v4.3 bug fix */ if (T4==0) T5 = EXPL_THRESHOLD; else T5 = kgisl/T4; if (T5<EXPL_THRESHOLD) {T6 = exp(T5); Ggislb = -Igisl*T6*T5/T4; } else {T6 = MAX_EXPL; Ggislb=0.0; } Ggisls*=T6; Ggislg*=T6; /* 3 new lines Wagner */ if (selfheat) Ggislt *= T6; else Ggislt = 0.0; Igisl*=T6; } here->B4SOIigisl = Igisl; /* End of GISL */ /* enhance next if-then-else block Wagner */ if (model->B4SOImtrlMod==0) { /*T1 = (Vds - rgidl*Vgs_eff - pParam->B4SOIegidl) / T0; *//* v4.2 bugfix #26 */ T1 = (Vds - rgidl*Vgs_eff - egidl) / T0; dTL1_dT = -rgidl * dVgs_eff_dT / T0; } else { /*T1 = (Vds - rgidl*Vgs_eff - pParam->B4SOIegidl+pParam->B4SOIvfbsd) / T0;*/ /* v4.2 bugfix #26 */ T1 = (Vds - rgidl * Vgs_eff - egidl + pParam->B4SOIvfbsd) / T0; dTL1_dT = -rgidl * dVgs_eff_dT / T0; } /* GIDL */ if ((agidl <= 0.0) || (bgidl <= 0.0) || (T1 <= 0.0) || (cgidl < 0.0) ) Igidl = Ggidld = Ggidlg = Ggidlb = Ggidlt = 0.0; /* enhanced line Wagner */ else { dT1_dVd = 1 / T0; dT1_dVg = - rgidl*dT1_dVd * dVgs_eff_dVg; T2 = bgidl / T1; if (T2 < EXPL_THRESHOLD) { Igidl = wdiod * agidl * T1 * exp(-T2); T3 = Igidl / T1 * (T2 + 1); Ggidld = T3 * dT1_dVd; Ggidlg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggidlt = T3 * dTL1_dT; else Ggidlt = 0.0; } else { T3 = wdiod * agidl * MIN_EXPL; Igidl = T3 * T1 ; Ggidld = T3 * dT1_dVd; Ggidlg = T3 * dT1_dVg; /* 3 new lines Wagner */ if (selfheat) Ggidlt = T3 * dTL1_dT; else Ggidlt = 0.0; } T4 = Vbd - fgidl; /*if (T4==0) T5 =1; else T5 = kgidl/T4; T6 = exp(T5); if (T6<EXPL_THRESHOLD) {Ggidld*=exp(T5); Ggidlg*=exp(T5); Ggidlb = -Igidl*exp(T5)*T5/T4; Igidl*=exp(T5); } else Ggidlb=0.0; v4.3 bug fix */ if (T4==0) T5 = EXPL_THRESHOLD; else T5 = kgidl/T4; if (T5<EXPL_THRESHOLD) {T6 = exp(T5); Ggidlb = -Igidl*T6*T5/T4; } else {T6 = MAX_EXPL; Ggidlb=0.0; } Ggidld*=T6; Ggidlg*=T6; /* 3 new lines Wagner */ if (selfheat) Ggidlt *= T6; else Ggidlt = 0.0; Igidl*=T6; } here->B4SOIigidl = Igidl; /* End of GIDL */ } /* calculate diode and BJT current */ WsTsi = wdios * model->B4SOItsi; WdTsi = wdiod * model->B4SOItsi; /* NVtm1 = Vtm * pParam->B4SOIndiode; v4.2 bugfix */ NVtm1 = Vtm * ndiode; if (selfheat) /*dNVtm1_dT = pParam->B4SOIndiode * dVtm_dT; v4.2 bugfix */ dNVtm1_dT = ndiode * dVtm_dT; else dNVtm1_dT = 0; T0 = vbs_jct / NVtm1; /* v4.0 */ dT0_dVb = 1.0 / NVtm1; if (selfheat) dT0_dT = -vbs_jct / NVtm1 / NVtm1 * dNVtm1_dT; else dT0_dT = 0; DEXP(T0, ExpVbsNVtm, T1); dExpVbsNVtm_dVb = T1 * dT0_dVb; if (selfheat) dExpVbsNVtm_dT = T1 * dT0_dT; else dExpVbsNVtm_dT = 0; /* NVtm1 = Vtm * pParam->B4SOIndioded; v4.2 bugfix */ NVtm1 = Vtm * ndioded; /* v4.0 drain side */ if (selfheat) /*dNVtm1_dT = pParam->B4SOIndioded* dVtm_dT; v4.2 bugfix */ dNVtm1_dT = ndioded * dVtm_dT; else dNVtm1_dT = 0; T0 = vbd_jct / NVtm1; /* v4.0 */ dT0_dVb = 1.0 / NVtm1; dT0_dVd = -dT0_dVb; if (selfheat) dT0_dT = -vbd_jct / NVtm1 / NVtm1 * dNVtm1_dT; else dT0_dT = 0; DEXP(T0, ExpVbdNVtm, T1); dExpVbdNVtm_dVb = T1 * dT0_dVb; dExpVbdNVtm_dVd = -dExpVbdNVtm_dVb; if (selfheat) dExpVbdNVtm_dT = T1 * dT0_dT; else dExpVbdNVtm_dT = 0; /* Ibs1: diffusion current */ if (jdifs == 0) { Ibs1 = dIbs1_dVb = dIbs1_dT = 0; } else { T0 = WsTsi * jdifs; if (selfheat) dT0_dT = WsTsi * djdifs_dT; else dT0_dT = 0; Ibs1 = T0 * (ExpVbsNVtm - 1); dIbs1_dVb = T0 * dExpVbsNVtm_dVb; if (selfheat) dIbs1_dT = T0 * dExpVbsNVtm_dT + (ExpVbsNVtm - 1) * dT0_dT; else dIbs1_dT = 0; } /* Ibd1: diffusion current */ if (jdifd == 0) { Ibd1 = dIbd1_dVb = dIbd1_dVd = dIbd1_dT = 0; } else { T0 = WdTsi * jdifd; if (selfheat) dT0_dT = WdTsi * djdifd_dT; else dT0_dT = 0; Ibd1 = T0 * (ExpVbdNVtm - 1); dIbd1_dVb = T0 * dExpVbdNVtm_dVb; dIbd1_dVd = -dIbd1_dVb; if (selfheat) dIbd1_dT = T0 * dExpVbdNVtm_dT + (ExpVbdNVtm -1) * dT0_dT; else dIbd1_dT = 0; } /* Ibs2:recombination/trap-assisted tunneling current */ if (jrecs == 0) { Ibs2 = dIbs2_dVb = dIbs2_dT = 0; } else { /* forward bias */ /* NVtmf = 0.026 * nrecf0s bugfix_snps for DC swapping * (1 + pParam->B4SOIntrecf * (TempRatio - 1)); NVtmr = 0.026 * nrecr0s bugfix_snps for DC swapping * (1 + pParam->B4SOIntrecr * (TempRatio - 1)); */ NVtmf = Vtm00 * nrecf0s /* bugfix_snps for DC swapping*/ * (1 + pParam->B4SOIntrecf * (TempRatio - 1)); /* v4.3.1 -Tanvir */ NVtmr = Vtm00 * nrecr0s /* bugfix_snps for DC swapping*/ * (1 + pParam->B4SOIntrecr * (TempRatio - 1)); /* v4.3.1 -Tanvir */ if (selfheat) { /* dNVtmf_dT = nrecf0s * 0.026 bugfix_snps for DC swapping * pParam->B4SOIntrecf * dTempRatio_dT; dNVtmr_dT = nrecr0s * 0.026 bugfix_snps for DC swapping * pParam->B4SOIntrecr * dTempRatio_dT; */ dNVtmf_dT = nrecf0s * Vtm00 /* bugfix_snps for DC swapping*/ * pParam->B4SOIntrecf * dTempRatio_dT; /* v4.3.1 -Tanvir */ dNVtmr_dT = nrecr0s * Vtm00 /* bugfix_snps for DC swapping*/ * pParam->B4SOIntrecr * dTempRatio_dT; /* v4.3.1 -Tanvir */ } else dNVtmf_dT = dNVtmr_dT = 0; T0 = vbs_jct / NVtmf; /* v4.0 */ DEXP(T0,T10,T2); T4 = 1 / NVtmf; dT10_dVb = T4 * T2; if (selfheat) dT10_dT = - T4 * T2 * vbs_jct / NVtmf * dNVtmf_dT ; else dT10_dT = 0.0; /* reverse bias */ if ((vrec0s- vbs_jct) < 1e-3) { /* bugfix_snps for DC swapping*/ /* v2.2.3 bug fix */ T1 = 1e3; T0 = -vbs_jct / NVtmr * vrec0s * T1; /* bugfix_snps for DC swapping*/ T11 = -exp(T0); dT11_dVb = dT11_dT = 0; } else { T1 = 1 / (vrec0s - vbs_jct); /* bugfix_snps for DC swapping*/ T0 = -vbs_jct / NVtmr * vrec0s * T1; /* bugfix_snps for DC swapping*/ dT0_dVb = -vrec0s / NVtmr * /* bugfix_snps for DC swapping*/ (T1 + vbs_jct * T1 * T1) ; if (selfheat) dT0_dT = -T0 / NVtmr * dNVtmr_dT; else dT0_dT = 0; DEXP(T0, T11, T2); T11 = -T11; dT11_dVb = -T2 * dT0_dVb; if (selfheat) dT11_dT = -T2 * dT0_dT; else dT11_dT = 0; } T3 = WsTsi * jrecs; Ibs2 = T3 * (T10 + T11); dIbs2_dVb = T3 * (dT10_dVb + dT11_dVb); if (selfheat) dIbs2_dT = T3 * (dT10_dT + dT11_dT) + WsTsi * (T10 + T11) * djrecs_dT; else dIbs2_dT = 0; } if (jrecd == 0) { Ibd2 = dIbd2_dVb = dIbd2_dVd = dIbd2_dT = 0; } else { /*NVtmf = 0.026 * nrecf0d bugfix_snps for DC swapping * (1 + pParam->B4SOIntrecf * (TempRatio - 1)); NVtmr = 0.026 * nrecr0d bugfix_snps for DC swapping * (1 + pParam->B4SOIntrecr * (TempRatio - 1)); */ NVtmf = Vtm00 * nrecf0d /* bugfix_snps for DC swapping*/ * (1 + pParam->B4SOIntrecf * (TempRatio - 1)); /* v4.3.1 -Tanvir */ NVtmr = Vtm00 * nrecr0d /* bugfix_snps for DC swapping*/ * (1 + pParam->B4SOIntrecr * (TempRatio - 1)); /* v4.3.1 -Tanvir */ if (selfheat) { /* dNVtmf_dT = nrecf0d * 0.026 bugfix_snps for DC swapping * pParam->B4SOIntrecf * dTempRatio_dT; dNVtmr_dT = nrecr0d * 0.026 * pParam->B4SOIntrecr * dTempRatio_dT; bugfix_snps for DC swapping */ dNVtmf_dT = nrecf0d * Vtm00 /*bugfix_snps for DC swapping*/ * pParam->B4SOIntrecf * dTempRatio_dT; /* v4.3.1 -Tanvir */ dNVtmr_dT = nrecr0d * Vtm00 /* v4.3.1 -Tanvir */ * pParam->B4SOIntrecr * dTempRatio_dT; /* bugfix_snps for DC swapping*/ } else dNVtmf_dT = dNVtmr_dT = 0; T0 = vbd_jct / NVtmf; DEXP(T0,T10,T2); T4 = 1 / NVtmf; dT10_dVb = T4 * T2; if (selfheat) dT10_dT = - T4 * T2 * vbd_jct / NVtmf * dNVtmf_dT ; else dT10_dT = 0.0; if ((vrec0d - vbd_jct) < 1e-3) { /* bugfix_snps for DC swapping*/ /* v2.2.3 bug fix */ T1 = 1e3; T0 = -vbd_jct / NVtmr * vrec0d * T1; /* bugfix_snps for DC swapping*/ T11 = -exp(T0); dT11_dVb = dT11_dT = 0; } else { T1 = 1 / (vrec0d - vbd_jct); /* bugfix_snps for DC swapping*/ T0 = -vbd_jct / NVtmr * vrec0d * T1; /* bugfix_snps for DC swapping*/ dT0_dVb = -vrec0d / NVtmr /* bugfix_snps for DC swapping*/ * (T1 + vbd_jct * T1 * T1) ; if (selfheat) dT0_dT = -T0 / NVtmr * dNVtmr_dT; else dT0_dT = 0; DEXP(T0, T11, T2); T11 = - T11; dT11_dVb = -T2 * dT0_dVb; if (selfheat) dT11_dT = -T2 * dT0_dT; else dT11_dT = 0; } T3 = WdTsi * jrecd; Ibd2 = T3 * (T10 + T11); dIbd2_dVb = T3 * (dT10_dVb + dT11_dVb); dIbd2_dVd = -dIbd2_dVb; if (selfheat) dIbd2_dT = T3 * (dT10_dT + dT11_dT) + WdTsi * (T10 + T11) * djrecd_dT; else dIbd2_dT = 0; } /* Ibs3/Ibd3: recombination current in neutral body */ WTsi = pParam->B4SOIweff / here->B4SOInseg * model->B4SOItsi; if (jbjts == 0.0 && jbjtd == 0.0) { Ibs3 = dIbs3_dVb = dIbs3_dVd = dIbs3_dT = 0.0; Ibd3 = dIbd3_dVb = dIbd3_dVd = dIbd3_dT = 0.0; Ibsdif = dIbsdif_dVb = dIbsdif_dT = 0; /*Ibddif = dIbddif_dVb = dIbddif_dT = 0; v4.2 */ Ibddif = dIbddif_dVb = dIbddif_dT = 0; here->B4SOIic = Ic = Gcd = Gcb = GcT = 0.0; } else { Ien = WTsi * jbjts * pParam->B4SOIlratio; if (selfheat) dIen_dT = WTsi * djbjts_dT * pParam->B4SOIlratio; else dIen_dT = 0; /* high level injection of source side */ if ((Ehlis = Ahlis * (ExpVbsNVtm - 1)) < 1e-5) { Ehlis = dEhlis_dVb = dEhlis_dT = 0; EhlisFactor = 1; dEhlisFactor_dVb = dEhlisFactor_dT = 0; } else { dEhlis_dVb = Ahlis * dExpVbsNVtm_dVb; if (selfheat) dEhlis_dT = Ahlis * dExpVbsNVtm_dT + (ExpVbsNVtm - 1) * dAhlis_dT; else dEhlis_dT = 0; EhlisFactor = 1.0 / sqrt(1 + Ehlis); T0 = -0.5 * EhlisFactor / (1 + Ehlis); dEhlisFactor_dVb = T0 * dEhlis_dVb; if (selfheat) dEhlisFactor_dT = T0 * dEhlis_dT; else dEhlisFactor_dT = 0; } /* high level injection of drain side */ if ((Ehlid = Ahlid * (ExpVbdNVtm - 1)) < 1e-5) { Ehlid = dEhlid_dVb = dEhlid_dVd = dEhlid_dT = 0; EhlidFactor = 1; dEhlidFactor_dVb = dEhlidFactor_dT = 0; /* LFW_FD flexilint */ } else { dEhlid_dVb = Ahlid * dExpVbdNVtm_dVb; dEhlid_dVd = -dEhlid_dVb; if (selfheat) dEhlid_dT = Ahlid * dExpVbdNVtm_dT + (ExpVbdNVtm - 1) * dAhlid_dT; else dEhlid_dT = 0; EhlidFactor = 1.0 / sqrt(1 + Ehlid); T0 = -0.5 * EhlidFactor / (1 + Ehlid); dEhlidFactor_dVb = T0 * dEhlid_dVb; if (selfheat) dEhlidFactor_dT = T0 * dEhlid_dT; else dEhlidFactor_dT = 0; } /* v3.1.1 bug fix for Ibjt(L) discontinuity */ T0 = 1 - pParam->B4SOIarfabjt; T1 = T0 * Ien; if (selfheat) dT1_dT = T0 * dIen_dT; else dT1_dT = 0; Ibs3 = T1 * (ExpVbsNVtm - 1) * EhlisFactor; dIbs3_dVb = T1 * (dExpVbsNVtm_dVb * EhlisFactor + (ExpVbsNVtm - 1) * dEhlisFactor_dVb); dIbs3_dVd = 0; if (selfheat) dIbs3_dT = dT1_dT * (ExpVbsNVtm - 1) * EhlisFactor + T1 * (dExpVbsNVtm_dT * EhlisFactor + (ExpVbsNVtm - 1) * dEhlisFactor_dT); else dIbs3_dT = 0.0; Ien = WTsi * jbjtd * pParam->B4SOIlratio; if (selfheat) dIen_dT = WTsi * djbjtd_dT * pParam->B4SOIlratio; else dIen_dT = 0; T1 = T0 * Ien; if (selfheat) dT1_dT = T0 * dIen_dT; else dT1_dT = 0; Ibd3 = T1 * (ExpVbdNVtm - 1) * EhlidFactor; dIbd3_dVb = T1 * (dExpVbdNVtm_dVb * EhlidFactor + (ExpVbdNVtm - 1) * dEhlidFactor_dVb); dIbd3_dVd = -dIbd3_dVb; if (selfheat) dIbd3_dT = dT1_dT * (ExpVbdNVtm - 1) * EhlidFactor + T1 * (dExpVbdNVtm_dT * EhlidFactor + (ExpVbdNVtm - 1) * dEhlidFactor_dT); else dIbd3_dT = 0.0; /* effective diffusion current for capacitance calcu. */ Iendif = WTsi * jbjts * pParam->B4SOIlratiodif; if (selfheat) dIendif_dT = WTsi * djbjts_dT * pParam->B4SOIlratiodif; else dIendif_dT = 0; Ibsdif = Iendif * (ExpVbsNVtm - 1) * EhlisFactor; dIbsdif_dVb = Iendif * (dExpVbsNVtm_dVb * EhlisFactor + (ExpVbsNVtm - 1) * dEhlisFactor_dVb); if (selfheat) dIbsdif_dT = dIendif_dT * (ExpVbsNVtm - 1) * EhlisFactor + Iendif * (dExpVbsNVtm_dT * EhlisFactor + (ExpVbsNVtm - 1) * dEhlisFactor_dT); else dIbsdif_dT = 0; Iendif = WTsi * jbjtd * pParam->B4SOIlratiodif; if (selfheat) dIendif_dT = WTsi * djbjtd_dT * pParam->B4SOIlratiodif; else dIendif_dT = 0; Ibddif = Iendif * (ExpVbdNVtm - 1) * EhlidFactor; dIbddif_dVb = Iendif * (dExpVbdNVtm_dVb * EhlidFactor + (ExpVbdNVtm - 1) * dEhlidFactor_dVb); /*dIbddif_dVd = -dIbddif_dVb; v4.2 */ if (selfheat) dIbddif_dT = dIendif_dT * (ExpVbdNVtm - 1) * EhlidFactor + Iendif * (dExpVbdNVtm_dT * EhlidFactor + (ExpVbdNVtm - 1) * dEhlidFactor_dT); else dIbddif_dT = 0; /* Ic: Bjt collector current */ if ((here->B4SOIbjtoff == 1) || (Vds == 0.0)) { here->B4SOIic = Ic = Gcd = Gcb = GcT = 0.0; dIc_dVb = dIc_dVd = 0.0; /*bugfix_snps for setting zero */ } else { /* second order effects */ /* T0 = 1 + (Vbs + Vbd) / pParam->B4SOIvearly; v4.3 bugfix */ T0 = 1 + (vbs_jct + vbd_jct) / pParam->B4SOIvearly; dT0_dVb = 2.0 / pParam->B4SOIvearly; dT0_dVd = -1.0 / pParam->B4SOIvearly; T1 = Ehlis + Ehlid; dT1_dVb = dEhlis_dVb + dEhlid_dVb; dT1_dVd = dEhlid_dVd; if (selfheat) dT1_dT = dEhlis_dT + dEhlid_dT; else dT1_dT = 0; T3 = sqrt(T0 * T0 + 4 * T1); dT3_dVb = 0.5 / T3 * (2 * T0 * dT0_dVb + 4 * dT1_dVb); dT3_dVd = 0.5 / T3 * (2 * T0 * dT0_dVd + 4 * dT1_dVd); if (selfheat) dT3_dT = 2 * dT1_dT / T3; else dT3_dT = 0; T2 = (T0 + T3) / 2.0; dT2_dVb = (dT0_dVb + dT3_dVb) / 2.0; dT2_dVd = (dT0_dVd + dT3_dVd) / 2.0; if (selfheat) dT2_dT = dT3_dT /2.0; else dT2_dT = 0; if (T2 < .1) { E2ndFactor = 10.0; dE2ndFactor_dVb = dE2ndFactor_dVd = dE2ndFactor_dT = 0; } else { E2ndFactor = 1.0 / T2; dE2ndFactor_dVb = -E2ndFactor / T2 * dT2_dVb; dE2ndFactor_dVd = -E2ndFactor / T2 * dT2_dVd; if (selfheat) dE2ndFactor_dT = -E2ndFactor / T2 * dT2_dT; else dE2ndFactor_dT = 0; } T0 = pParam->B4SOIarfabjt * Ien; /* here Ien refers to the drain side to simplify the code */ if (selfheat) dT0_dT = pParam->B4SOIarfabjt * dIen_dT; else dT0_dT = 0; here->B4SOIic = Ic = T0 * (ExpVbsNVtm - ExpVbdNVtm) * E2ndFactor; Gcb = dIc_dVb = T0 * ((dExpVbsNVtm_dVb - dExpVbdNVtm_dVb) * E2ndFactor + (ExpVbsNVtm - ExpVbdNVtm) * dE2ndFactor_dVb); Gcd = dIc_dVd = T0 * (-dExpVbdNVtm_dVd * E2ndFactor + (ExpVbsNVtm - ExpVbdNVtm) * dE2ndFactor_dVd); if (selfheat) GcT = T0 * (dExpVbsNVtm_dT - dExpVbdNVtm_dT) * E2ndFactor + dT0_dT * (ExpVbsNVtm - ExpVbdNVtm) * E2ndFactor + T0 * (ExpVbsNVtm - ExpVbdNVtm) * dE2ndFactor_dT; else GcT = 0; } } /* Ibs4/Ibd4 : tunneling */ if (jtuns == 0 && jtund == 0) { Ibs4 = Ibd4 = dIbs4_dVb = dIbs4_dT = dIbd4_dVb = dIbd4_dVd = dIbd4_dT = 0; } else { /* NVtm2 = 0.026 * ntuns; */ /* bugfix_snps for junction DC swapping*/ NVtm2 = Vtm00 * ntuns; /* bugfix_snps for junction DC swapping*/ /* v4.3.1 -Tanvir */ if ((vtun0s - vbs_jct) < 1e-3) /* bugfix_snps for junction DC swapping*/ { /* v2.2.3 bug fix */ T1=1e3; T0 = -vbs_jct / NVtm2 * vtun0s * T1; /* bugfix_snps for junction DC swapping*/ T1 = exp(T0); T3 = WsTsi * jtuns; Ibs4 = T3 * (1- T1); /*dIbs4_dVb = dIbs4_dT = 0; */ dIbs4_dVb = 0.0; if (selfheat) dIbs4_dT = (1 - T1) * WsTsi * djtuns_dT; else dIbs4_dT = 0; } else { T1 = 1 / (vtun0s - vbs_jct); /*bugfix for junction DC swapping*/ T0 = -vbs_jct / NVtm2 * vtun0s * T1; /*bugfix for junction DC swapping*/ dT0_dVb = -vtun0s / NVtm2 * (T1 + vbs_jct * T1 * T1) ; /*bugfix for junction DC swapping*/ DEXP(T0, T1, T2); T3 = WsTsi * jtuns; Ibs4 = T3 * (1- T1); dIbs4_dVb = -T3 * T2 * dT0_dVb; if (selfheat) dIbs4_dT = (1 - T1) * WsTsi * djtuns_dT; else dIbs4_dT = 0; } /*NVtm2 = 0.026 * ntund;*/ /* bugfix_snps for junction DC swapping*/ NVtm2 = Vtm00 * ntund; /* v4.3.1 -Tanvir */ if ((vtun0d - vbd_jct) < 1e-3) { /* bugfix_snps for junction DC swapping*/ /* v2.2.3 bug fix */ T1=1e3; T0 = -vbd_jct / NVtm2 * vtun0d * T1; /* bugfix_snps for junction DC swapping*/ T1 = exp(T0); T3 = WdTsi * jtund; Ibd4 = T3 * (1- T1); /*dIbd4_dVb = dIbd4_dT = 0;*/ dIbd4_dVb = 0; dIbd4_dVd = 0; if (selfheat) /* dIbs4_dT = (1 - T1) * WsTsi * djtuns_dT; Bug fix #8 Jun 09 'typo's corrected for Drain side */ /* else dIbs4_dT = 0; */ dIbd4_dT = (1 - T1) * WdTsi * djtund_dT; /* Fix */ else dIbd4_dT = 0; } else { T1 = 1 / (vtun0d - vbd_jct); /* bugfix_snps for junction DC swapping*/ T0 = -vbd_jct / NVtm2 * vtun0d * T1; /* bugfix_snps for junction DC swapping*/ dT0_dVb = -vtun0d / NVtm2 * (T1 + vbd_jct * T1 * T1) ; /* bugfix_snps for junction DC swapping*/ DEXP(T0, T1, T2); T3 = WdTsi * jtund; Ibd4 = T3 * (1- T1); dIbd4_dVb = -T3 * T2 * dT0_dVb; dIbd4_dVd = -dIbd4_dVb; if (selfheat) dIbd4_dT = (1 - T1) * WdTsi * djtund_dT; else dIbd4_dT = 0; } } here->B4SOIitun = - Ibd3 - Ibd4; Ibs = Ibs1 + Ibs2 + Ibs3 + Ibs4; Ibd = Ibd1 + Ibd2 + Ibd3 + Ibd4; Gjsb = dIbs1_dVb + dIbs2_dVb + dIbs3_dVb + dIbs4_dVb; Gjsd = dIbs3_dVd; if (selfheat) GjsT = dIbs1_dT + dIbs2_dT + dIbs3_dT + dIbs4_dT; else GjsT = 0.0; Gjdb = dIbd1_dVb + dIbd2_dVb + dIbd3_dVb + dIbd4_dVb; Gjdd = dIbd1_dVd + dIbd2_dVd + dIbd3_dVd + dIbd4_dVd; if (selfheat) GjdT = dIbd1_dT + dIbd2_dT + dIbd3_dT + dIbd4_dT; else GjdT = 0.0; } else /* v3.1 soiMod=2: ideal FD */ { here->B4SOIigidl= Igidl = Ggidld = Ggidlg = Ggidlb = Ggidlt = 0.0; /* LFW_FD inits */ here->B4SOIigisl= Igisl /* Bug fix #9 Jun 09 Code added to set Igisl components to zero */ = Ggisls = Ggislg = Ggislb = Ggislt = 0.0; /* This is an appx solution */ here->B4SOIitun = 0; Ibs = 0; Ibd = 0; Gjsb = 0.0; Gjdb = 0.0; Gjsd = 0.0; Gjdd = 0.0; /* here->B4SOIigidl= Igidl */ /* = Ggidld = Ggidlg = Ggidlb = Ggidlt = 0.0; LFW_FD enhance line */ /* here->B4SOIigisl= Igisl Bug fix #9 Jun 09 Code added to set Igisl components to zero */ /* = Ggisls = Ggislg = Ggislb = Ggislt = 0.0; This is an appx solution - LFW_FD enhance line */ /* Final code will comply with BSIM MG in future releases */ /* here->B4SOIitun = 0; */ /* LFW_FD next 21 lines; fix Ibs, Ibd, and derivatives Gjs* and Gjd* */ /* Ibs = 0; */ /* Ibd = 0; */ /* Add Gmin since body node is floating - LFW - DIDN'T Converge */ /* Connect to electrical source, since source is BSIM reference */ /* Also option to connect to both source and drain */ /* if (here->B4SOImode == 1) */ /* { */ /* Ibs = 1.0e-18 * vbs; */ /* Ibd = 1.0e-18 * vbd; */ /* } */ /* else */ /* { */ /* Ibs = 1.0e-18 * vbd; */ /* Ibd = 1.0e-18 * vbs; */ /* } */ /* Gjsb = 1.0e-18; */ /* Gjdb = 1.0e-18; */ /* Gjsd = 0.0; */ /* Gjdd = -1.0e-18; */ GjsT = 0; GjdT = 0; here->B4SOIic = Ic = Gcd = Gcb = GcT = 0.0; } if (here->B4SOImode > 0) { here->B4SOIibs = Ibs; here->B4SOIibd = Ibd; } else { here->B4SOIibd = Ibs; here->B4SOIibs = Ibd; } /* LFW_FD 12 new lines per flexilint */ Vfb = 0.0; Voxacc = dVoxacc_dVg = dVoxacc_dVd = dVoxacc_dVb = dVoxacc_dVe = 0.0; Voxdepinv = dVoxdepinv_dVg = dVoxdepinv_dVd = dVoxdepinv_dVb = dVoxdepinv_dT= dVoxdepinv_dVe = 0.0; Vgb = Vgs_eff - Vbs; /* flexilint - moved from below if stmt */ dVgb_dVg = dVgs_eff_dVg - dVbs_dVg; dVgb_dVd = - dVbs_dVd; dVgb_dVe = - dVbs_dVe; dVgb_dVb = - dVbs_dVb; dVoxacc_dT = 0.0; dVfb_dT = 0.0; /* v3.0: gate-tunneling */ if ((model->B4SOIigbMod != 0) || (model->B4SOIigcMod != 0)) { /* Calculate Vox first */ Vfb = model->B4SOItype * here->B4SOIvth0 /* v4.0 */ - phi - pParam->B4SOIk1eff * sqrtPhi; dVfb_dT = - dphi_dT - pParam->B4SOIk1eff*dsqrtPhi_dT; /* new line Wagner */ T3 = Vfb - Vgs_eff + Vbs - DELTA_3; /* LFW_FD add/fix 5 derivatives */ dT3_dVg = -dVgs_eff_dVg + dVbs_dVg; dT3_dVd = dVbs_dVd; dT3_dVe = dVbs_dVe; dT3_dVb = dVbs_dVb; dTL3_dT = dVfb_dT - dVgs_eff_dT + dVbs_dT; if (Vfb <= 0.0) { T0 = sqrt(T3 * T3 - 4.0 * DELTA_3 * Vfb); dT0_dVg = 1.0/(2.0 * T0) * 2.0*T3 * dT3_dVg; dT0_dVb = 0.5*(1.0/T0) * 2.0*T3 * dT3_dVb; /* LFW_FD add 2 derivatives */ dT0_dVd = T3 * dT3_dVd / T0; dT0_dVe = T3 * dT3_dVe / T0; dTL0_dT = (T3 * dTL3_dT - 2.0 * DELTA_3 * dVfb_dT) / T0; /* new line Wagner */ TL1 = -1.0; /* new line Wagner */ } else { T0 = sqrt(T3 * T3 + 4.0 * DELTA_3 * Vfb); dT0_dVg = 1.0/(2.0 * T0) * 2.0*T3 * dT3_dVg; dT0_dVb = 0.5*(1.0/T0) * 2.0*T3 * dT3_dVb; /* LFW_FD add 2 derivatives */ dT0_dVd = T3 * dT3_dVd / T0; dT0_dVe = T3 * dT3_dVe / T0; dTL0_dT = (T3 * dTL3_dT + 2.0 * DELTA_3 * dVfb_dT) / T0; /* new line Wagner */ TL1 = 1.0; /* new line Wagner */ } Vfbeff = Vfb - 0.5 * (T3 + T0); dVfbeff_dVg = -0.5 * (dT3_dVg + dT0_dVg); dVfbeff_dVb = -0.5 * (dT3_dVb + dT0_dVb); /* LFW_FD add 2 derivatives */ dVfbeff_dVd = -0.5 * (dT3_dVd + dT0_dVd); dVfbeff_dVe = -0.5 * (dT3_dVe + dT0_dVe); /* 2 new lines - Wagner */ if (selfheat) dVfbeff_dT = dVfb_dT - 0.5 * (dTL3_dT + dTL0_dT); else dVfbeff_dT = 0.0; Voxacc = Vfb - Vfbeff; dVoxacc_dVg = -dVfbeff_dVg; /* LFW_FD add/fix 2 derivatives */ dVoxacc_dVd = -dVfbeff_dVd; dVoxacc_dVe = -dVfbeff_dVe; dVoxacc_dVb = -dVfbeff_dVb; if (Voxacc < 0.0) Voxacc = dVoxacc_dVg = dVoxacc_dVb = dVoxacc_dVd = dVoxacc_dVe = 0.0; /* LFW_FD enhance line */ /* 2 new lines Wagner */ if (selfheat) dVoxacc_dT = dVfb_dT - dVfbeff_dT; else dVoxacc_dT = 0.0; T0 = Vgs_eff - Vgsteff - Vfbeff - Vbseff; /* LFW_FD add/fix 4 derivatives */ dT0_dVg = dVgs_eff_dVg - dVgsteff_dVg - dVfbeff_dVg - dVbseff_dVg; /* v3.0 */ dT0_dVd = -dVgsteff_dVd - dVbseff_dVd - dVfbeff_dVd; dT0_dVb = -dVgsteff_dVb - dVbseff_dVb - dVfbeff_dVb; dT0_dVe = -dVgsteff_dVe - dVbseff_dVe - dVfbeff_dVe; dVoxdepinv_dT = 0.0; /* flexilint */ if (selfheat) /* fix below expression Wagner */ /*dT0_dT = -dVgsteff_dT - dVbseff_dT; v3.0 */ dT0_dT = dVgs_eff_dT - dVgsteff_dT - dVfbeff_dT - dVbseff_dT; /* v3.0 */ if (pParam->B4SOIk1ox == 0.0) /* v4.0 */ { Voxdepinv = dVoxdepinv_dVg = dVoxdepinv_dVd = dVoxdepinv_dVb = dVoxdepinv_dT = 0.0; } else { if (T0 < 0.0) { T1 = T0/pParam->B4SOIk1ox; dT1_dVg = dT0_dVg/pParam->B4SOIk1ox; dT1_dVd = dT0_dVd/pParam->B4SOIk1ox; dT1_dVb = dT0_dVb/pParam->B4SOIk1ox; dT1_dVe = dT0_dVe/pParam->B4SOIk1ox; /* v3.0 */ if (selfheat) dT1_dT = dT0_dT/pParam->B4SOIk1ox; else dT1_dT = 0.0; /* new line Wagner */ } else { T1 = pParam->B4SOIk1ox/2*(-1 + sqrt(1 + 4*T0/pParam->B4SOIk1ox/pParam->B4SOIk1ox)); T2 = pParam->B4SOIk1ox/2 * 0.5/sqrt(1 + 4*T0/pParam->B4SOIk1ox/pParam->B4SOIk1ox) * 4/pParam->B4SOIk1ox/pParam->B4SOIk1ox; dT1_dVg = T2 * dT0_dVg; dT1_dVd = T2 * dT0_dVd; dT1_dVb = T2 * dT0_dVb; dT1_dVe = T2 * dT0_dVe; /* v3.0 */ if (selfheat) dT1_dT = T2 * dT0_dT; else dT1_dT = 0.0; /* new line Wagner */ } Voxdepinv = Vgs_eff - (T1*T1 + Vbs) - Vfb; /* LFW_FD add/fix 5 derivatives */ dVoxdepinv_dVg = dVgs_eff_dVg - (2.0*T1*dT1_dVg) - dVbs_dVg; dVoxdepinv_dVd = -(2.0*T1*dT1_dVd) - dVbs_dVd; dVoxdepinv_dVb = -(2.0*T1*dT1_dVb) - dVbs_dVb; dVoxdepinv_dVe = -(2.0*T1*dT1_dVe) - dVbs_dVe; if (selfheat) dVoxdepinv_dT = dVgs_eff_dT -(2.0*T1*dT1_dT) - dVbs_dT - dVfb_dT; else dVoxdepinv_dT = 0.0; } } /* gate-channel tunneling component */ /* LFW_FD next 6 lines - flexilint inits */ Igd = dIgd_dVg = dIgd_dVd = 0.0; Igcd = dIgcd_dVg = dIgcd_dVd = dIgcd_dVb = dIgcd_dVe = 0.0; Igs = dIgs_dVg = dIgs_dVs = 0.0; Igcs = dIgcs_dVg = dIgcs_dVd = dIgcs_dVb = dIgcs_dVe = 0.0; ExpVxNVt = 0.0; dIgcd_dT = dIgcs_dT = 0.0; if (model->B4SOIigcMod) { T0 = Vtm * pParam->B4SOInigc; /* 2 new lines Wagner */ if (selfheat) dT0_dT = pParam->B4SOInigc * dVtm_dT; else dT0_dT = 0.0; VxNVt = (Vgs_eff - model->B4SOItype * here->B4SOIvth0) / T0; /* Vth instead of Vth0 may be used */ /* 2 new lines Wagner */ if (selfheat) dVxNVt_dT = (dVgs_eff_dT - VxNVt * dT0_dT) /T0; else dVxNVt_dT = 0.0; if (VxNVt > EXPL_THRESHOLD) { Vaux = Vgs_eff - model->B4SOItype * here->B4SOIvth0; dVaux_dVg = dVgs_eff_dVg; dVaux_dVd = 0.0; dVaux_dVb = 0.0; /* 3 new lines Wagner */ if (selfheat) dVaux_dT = dVgs_eff_dT; else dVaux_dT = 0.0; } else if (VxNVt < -EXPL_THRESHOLD) { Vaux = T0 * log(1.0 + MIN_EXPL); dVaux_dVg = dVaux_dVd = dVaux_dVb = 0.0; /* 3 new lines Wagner */ if (selfheat) dVaux_dT = dT0_dT * log(1.0 + MIN_EXPL); else dVaux_dT = 0.0; } else { ExpVxNVt = exp(VxNVt); Vaux = T0 * log(1.0 + ExpVxNVt); dVaux_dVg = ExpVxNVt / (1.0 + ExpVxNVt); dVaux_dVd = -dVaux_dVg * 0.0; dVaux_dVb = -dVaux_dVg * 0.0; dVaux_dVg *= dVgs_eff_dVg; /* Wagner New fix (moved from below into else block */ if (selfheat) dVaux_dT = dT0_dT*log(1.0+ExpVxNVt) + T0*ExpVxNVt*dVxNVt_dT/(1.0+ExpVxNVt); else dVaux_dT = 0.0; } T2 = Vgs_eff * Vaux; dT2_dVg = dVgs_eff_dVg * Vaux + Vgs_eff * dVaux_dVg; dT2_dVd = Vgs_eff * dVaux_dVd; dT2_dVb = Vgs_eff * dVaux_dVb; /* 2 new lines Wagner */ if (selfheat) dT2_dT = dVgs_eff_dT * Vaux + Vgs_eff * dVaux_dT; else dT2_dT = 0.0; T11 = pParam->B4SOIAechvb; T12 = pParam->B4SOIBechvb; T3 = pParam->B4SOIaigc * pParam->B4SOIcigc - pParam->B4SOIbigc; T4 = pParam->B4SOIbigc * pParam->B4SOIcigc; T5 = T12 * (pParam->B4SOIaigc + T3 * Voxdepinv - T4 * Voxdepinv * Voxdepinv); /* LFW_FD fix derivative */ if (selfheat) dT5_dT = T12 * (T3 - 2 * T4 * Voxdepinv) * dVoxdepinv_dT; else dT5_dT = 0.0; if (T5 > EXPL_THRESHOLD) { T6 = MAX_EXPL; dT6_dVg = dT6_dVd = dT6_dVb = dT6_dVe = dT6_dT = 0.0; /* LFW_FD enhance line */ } else if (T5 < -EXPL_THRESHOLD) { T6 = MIN_EXPL; dT6_dVg = dT6_dVd = dT6_dVb = dT6_dVe = dT6_dT = 0.0; /* LFW_FD enhance line */ } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * Voxdepinv); dT6_dVd = dT6_dVg * dVoxdepinv_dVd; dT6_dVe = dT6_dVg * dVoxdepinv_dVe; /* LFW_FD new line */ dT6_dVb = dT6_dVg * dVoxdepinv_dVb; dT6_dVg *= dVoxdepinv_dVg; /* LFW_FD fix - move from below into this else block */ if (selfheat) dT6_dT = T6 * dT5_dT; else dT6_dT = 0.0; } Igc = T11 * T2 * T6; dIgc_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgc_dVd = T11 * (T2 * dT6_dVd + T6 * dT2_dVd); dIgc_dVe = T11 * (T2 * dT6_dVe); /* LFW_FD new line */ dIgc_dVb = T11 * (T2 * dT6_dVb + T6 * dT2_dVb); /* 3 new lines Wagner */ if (selfheat) dIgc_dT = T11 * T2 * dT6_dT + T11 * dT2_dT * T6; else dIgc_dT = 0.0; T7 = -pParam->B4SOIpigcd * Vds; T8 = T7 * T7 + 2.0e-4; dT8_dVd = -2.0 * pParam->B4SOIpigcd * T7; if (T7 > EXPL_THRESHOLD) { T9 = MAX_EXPL; dT9_dVd = 0.0; } else if (T7 < -EXPL_THRESHOLD) { T9 = MIN_EXPL; dT9_dVd = 0.0; } else { T9 = exp(T7); dT9_dVd = -T9 * pParam->B4SOIpigcd; } T0 = T8 * T8; T1 = T9 - 1.0 + 1.0e-4; T10 = (T1 - T7) / T8; dT10_dVd = ((pParam->B4SOIpigcd + dT9_dVd) * T8 - (T1 - T7) * dT8_dVd) / T0; Igcs = Igc * T10; dIgcs_dVg = dIgc_dVg * T10; dIgcs_dVd = dIgc_dVd * T10 + Igc * dT10_dVd; dIgcs_dVb = dIgc_dVb * T10; dIgcs_dVe = dIgc_dVe * T10; /* LFW_FD new line */ /* 3 new lines Wagner */ if (selfheat) dIgcs_dT = dIgc_dT * T10; else dIgcs_dT = 0.0; T1 = T9 - 1.0 - 1.0e-4; T10 = (T7 * T9 - T1) / T8; dT10_dVd = (-pParam->B4SOIpigcd * T9 + (T7 - 1.0) * dT9_dVd - T10 * dT8_dVd) / T8; Igcd = Igc * T10; dIgcd_dVg = dIgc_dVg * T10; dIgcd_dVd = dIgc_dVd * T10 + Igc * dT10_dVd; dIgcd_dVb = dIgc_dVb * T10; dIgcd_dVe = dIgc_dVe * T10; /* LFW_FD new line */ /* 3 new lines Wagner */ if (selfheat) dIgcd_dT = dIgc_dT * T10; else dIgcd_dT = 0.0; here->B4SOIIgcs = Igcs; here->B4SOIgIgcsg = dIgcs_dVg; here->B4SOIgIgcsd = dIgcs_dVd; /* fix below expression Wagner */ /*here->B4SOIgIgcsb = dIgcs_dVb * dVbseff_dVb;*/ here->B4SOIgIgcsb = dIgcs_dVb; here->B4SOIgIgcse = dIgcs_dVe; /* LFW_FD new line */ here->B4SOIIgcd = Igcd; here->B4SOIgIgcdg = dIgcd_dVg; here->B4SOIgIgcdd = dIgcd_dVd; /* fix below expression Wagner */ /*here->B4SOIgIgcdb = dIgcd_dVb * dVbseff_dVb;*/ here->B4SOIgIgcdb = dIgcd_dVb; here->B4SOIgIgcde = dIgcd_dVe; /* LFW_FD new line */ T0 = vgs - pParam->B4SOIvfbsd; vgs_eff = sqrt(T0 * T0 + 1.0e-4); dvgs_eff_dvg = T0 / vgs_eff; T2 = vgs * vgs_eff; dT2_dVg = vgs * dvgs_eff_dvg + vgs_eff; /* T11 = pParam->B4SOIAechvbEdge; */ T13 = pParam->B4SOIAechvbEdges; T14 = pParam->B4SOIAechvbEdged; T12 = pParam->B4SOIBechvbEdge; T3 = pParam->B4SOIaigsd * pParam->B4SOIcigsd - pParam->B4SOIbigsd; T4 = pParam->B4SOIbigsd * pParam->B4SOIcigsd; T5 = T12 * (pParam->B4SOIaigsd + T3 * vgs_eff - T4 * vgs_eff * vgs_eff); if (T5 > EXPL_THRESHOLD) { T6 = MAX_EXPL; dT6_dVg = 0.0; } else if (T5 < -EXPL_THRESHOLD) { T6 = MIN_EXPL; dT6_dVg = 0.0; } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * vgs_eff) * dvgs_eff_dvg; } /* Igs = T11 * T2 * T6; */ Igs = T13 * T2 * T6; dIgs_dVg = T13 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgs_dVs = -dIgs_dVg; T0 = vgd - pParam->B4SOIvfbsd; vgd_eff = sqrt(T0 * T0 + 1.0e-4); dvgd_eff_dvg = T0 / vgd_eff; T2 = vgd * vgd_eff; dT2_dVg = vgd * dvgd_eff_dvg + vgd_eff; T5 = T12 * (pParam->B4SOIaigsd + T3 * vgd_eff - T4 * vgd_eff * vgd_eff); if (T5 > EXPL_THRESHOLD) { T6 = MAX_EXPL; dT6_dVg = 0.0; } else if (T5 < -EXPL_THRESHOLD) { T6 = MIN_EXPL; dT6_dVg = 0.0; } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * vgd_eff) * dvgd_eff_dvg; } /* Igd = T11 * T2 * T6; */ Igd = T14 * T2 * T6; dIgd_dVg = T14 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgd_dVd = -dIgd_dVg; here->B4SOIIgs = Igs; here->B4SOIgIgsg = dIgs_dVg; here->B4SOIgIgss = dIgs_dVs; here->B4SOIIgd = Igd; here->B4SOIgIgdg = dIgd_dVg; here->B4SOIgIgdd = dIgd_dVd; } else { here->B4SOIIgcs = here->B4SOIgIgcsg = here->B4SOIgIgcsd = here->B4SOIgIgcsb = 0.0; here->B4SOIIgcd = here->B4SOIgIgcdg = here->B4SOIgIgcdd = here->B4SOIgIgcdb = 0.0; here->B4SOIIgs = here->B4SOIgIgsg = here->B4SOIgIgss = 0.0; here->B4SOIIgd = here->B4SOIgIgdg = here->B4SOIgIgdd = 0.0; } here->B4SOIgIgcss = -(here->B4SOIgIgcsg + here->B4SOIgIgcsd + here->B4SOIgIgcsb + here->B4SOIgIgcse); /* LFW_FD fix line */ here->B4SOIgIgcds = -(here->B4SOIgIgcdg + here->B4SOIgIgcdd + here->B4SOIgIgcdb + here->B4SOIgIgcde); /* LFW_FD fix line */ Vfb2 = dVox_dT = 0.0; /* gate-body tunneling component */ if ((model->B4SOIigbMod!= 0) && (here->B4SOIsoiMod != 2)) /* v3.2 */ /* v3.1: the Igb calculation is skipped for the ideal FD mode */ { OxideRatio = pParam->B4SOIoxideRatio; Vox = Voxdepinv; /* Voxeff is Vox limited below Voxh */ T0 = model->B4SOIvoxh; T1 = T0 - Vox - model->B4SOIdeltavox; T3 = sqrt(T1 * T1 + 4*model->B4SOIdeltavox * T0); Voxeff = T0 - 0.5 * (T1 + T3); dVoxeff_dVox = 0.5 * (1.0 + T1 / T3); Vox = Voxeff; dVox_dVg = dVoxdepinv_dVg * dVoxeff_dVox; dVox_dVd = dVoxdepinv_dVd * dVoxeff_dVox; dVox_dVb = dVoxdepinv_dVb * dVoxeff_dVox; dVox_dVe = dVoxdepinv_dVe * dVoxeff_dVox; /* v3.0 */ if (selfheat) /* v4.2 Bug # 23 Jul09 */ dVox_dT = dVoxdepinv_dT * dVoxeff_dVox; T0 = (Vox - model->B4SOIebg)/model->B4SOIvevb; if (selfheat) dT0_dT = dVox_dT /model->B4SOIvevb; DEXP(T0, T1, T2); /* T1=exp(T0), T2=dT1_dT0 */ if (selfheat) dT1_dT = T2 * dT0_dT; Vaux = model->B4SOIvevb * log(1 + T1); dVaux_dVg = T2 / (1 + T1) * dVox_dVg; dVaux_dVd = T2 / (1 + T1) * dVox_dVd; dVaux_dVb = T2 / (1 + T1) * dVox_dVb; dVaux_dVe = T2 / (1 + T1) * dVox_dVe; /* v3.0 */ if (selfheat) dVaux_dT = T2 / (1 + T1) * dVox_dT; /* LFW_FD fix line */ else dVaux_dT = 0.0; if (model->B4SOIvgb1 != 0) { T0 = 1 - Vox / model->B4SOIvgb1; dT0_dVox = -1.0/model->B4SOIvgb1; if (selfheat) dT0_dT = -dVox_dT / model->B4SOIvgb1; } else { T0 = 1; dT0_dVox = dT0_dT = 0.0; } if (T0 < 0.01) { T0 = 0.01; dT0_dVox = dT0_dT = 0.0; } /* v2.2.3 bug fix */ /* T1 = (Leff * Weff / here->B4SOInseg + here->B4SOIagbcpd/here->B4SOInf) * 3.7622e-7 * OxideRatio; T2 = -3.1051e10 * model->B4SOItoxqm; */ T1 = (Leff * Weff / here->B4SOInseg + here->B4SOIagbcpd/here->B4SOInf) * agb1 * OxideRatio; /* bugfix v4.3.1 -Tanvir */ T2 = bgb1 * model->B4SOItoxqm; /* bugfix v4.3.1 -Tanvir */ T3 = pParam->B4SOIalphaGB1; T4 = pParam->B4SOIbetaGB1; T6 = T2*(T3 - T4 * Vox) / T0; if (selfheat) dT6_dT = -T2 * T4 * dVox_dT / T0 - T6/T0 * dT0_dT; else dT6_dT = 0.0; /* flexilint */ DEXP(T6, T5, T7); /* T5=exp(T6), T7=dT5_dT6 */ dT5_dVg = -T7 * dVox_dVg * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); dT5_dVd = -T7 * dVox_dVd * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); dT5_dVb = -T7 * dVox_dVb * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); dT5_dVe = -T7 * dVox_dVe * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); /* v3.0 */ if (selfheat) dT5_dT = T7 * dT6_dT; else dT5_dT = 0.0; /* flexilint */ Igb1 = T1 * Vgb * Vaux * T5; /* LFW_FD fix 5 derivatives */ dIgb1_dVg = T1 * (Vgb*Vaux*dT5_dVg + dVgb_dVg*Vaux*T5 + Vgb*T5*dVaux_dVg) + Vgb * Vaux * T5 * Leff * dWeff_dVg * agb1 * OxideRatio / here->B4SOInseg; dIgb1_dVd = T1 * (Vgb*Vaux*dT5_dVd + Vgb*T5*dVaux_dVd + dVgb_dVd*Vaux*T5); dIgb1_dVb = T1 * (Vgb*Vaux*dT5_dVb + dVgb_dVb*Vaux*T5 + Vgb*T5*dVaux_dVb) + Vgb * Vaux * T5 * Leff * dWeff_dVb * agb1 * OxideRatio / here->B4SOInseg; dIgb1_dVe = T1 * (Vgb*Vaux*dT5_dVe + Vgb*T5*dVaux_dVe + dVgb_dVe*Vaux*T5); if (selfheat) dIgb1_dT = T1 * Vgb * (Vaux*dT5_dT + T5*dVaux_dT) + Vgb * Vaux * T5 * Leff * dWeff_dT * agb1 * OxideRatio / here->B4SOInseg + T1 * dVgs_eff_dT * Vaux * T5; else dIgb1_dT = 0.0; Vox = Voxacc; /* Voxeff is Vox limited below Voxh */ T0 = model->B4SOIvoxh; T1 = T0 - Vox - model->B4SOIdeltavox; T3 = sqrt(T1 * T1 + 4*model->B4SOIdeltavox * T0); Voxeff = T0 - 0.5 * (T1 + T3); dVoxeff_dVox = 0.5 * (1.0 + T1 / T3); Vox = Voxeff; dVox_dVg = dVoxacc_dVg * dVoxeff_dVox; dVox_dVd = dVoxacc_dVd * dVoxeff_dVox; dVox_dVe = dVoxacc_dVe * dVoxeff_dVox; /* LFW_FD new line */ dVox_dVb = dVoxacc_dVb * dVoxeff_dVox; /* fix below expression Wagner */ /*dVox_dT = 0;*/ dVox_dT = dVoxeff_dVox * dVoxacc_dT; T0 = (-Vgb+(Vfb))/model->B4SOIvecb; /* fix below expression Wagner */ /*if (selfheat) dT0_dT = 0;*/ if (selfheat) dT0_dT = dVfb_dT/model->B4SOIvecb; else dT0_dT = 0; DEXP(T0, T1, T2); /* T1=exp(T0), T2=dT1_dT0 */ /* fix below expression - Wagner */ /*if (selfheat) dT1_dT = 0;*/ if (selfheat) dT1_dT = T2 * dT0_dT; else dT1_dT = 0; Vaux = model->B4SOIvecb* log(1 + T1); /* LFW_FD fix/add 4 derivatives */ dVaux_dVg = - T2 / (1 + T1) * dVgb_dVg; dVaux_dVd = - T2 / (1 + T1) * dVgb_dVd; dVaux_dVe = - T2 / (1 + T1) * dVgb_dVe; dVaux_dVb = - T2 / (1 + T1) * dVgb_dVb; /* fix below expression - Wagner */ /*if (selfheat) dVaux_dT = 0;*/ if (selfheat) dVaux_dT = model->B4SOIvecb * dT1_dT / (1 + T1); else dVaux_dT = 0.0; if (model->B4SOIvgb2 != 0) { T0 = 1 - Vox / model->B4SOIvgb2; dT0_dVox = -1.0/model->B4SOIvgb2; if (selfheat) dT0_dT = -dVox_dT / model->B4SOIvgb2; } else { T0 = 1; dT0_dVox = dT0_dT =0.0; } if (T0 < 0.01) { T0 = 0.01; dT0_dVox = dT0_dT =0.0; } /* v2.2.3 bug fix */ /* T1 = (Leff * Weff / here->B4SOInseg + here->B4SOIagbcpd/here->B4SOInf) * 4.9758e-7 * OxideRatio; T2 = -2.357e10 * model->B4SOItoxqm; */ T1 = (Leff * Weff / here->B4SOInseg + here->B4SOIagbcpd/here->B4SOInf) * agb2 * OxideRatio; /* bugfix v4.3.1 -Tanvir */ T2 = bgb2 * model->B4SOItoxqm; /* bugfix v4.3.1 -Tanvir */ T3 = pParam->B4SOIalphaGB2; T4 = pParam->B4SOIbetaGB2; T6 = T2*(T3 - T4 * Vox) / T0; if (selfheat) dT6_dT = -T2 * T4 * dVox_dT / T0 - T6/T0 * dT0_dT; else dT6_dT = 0.0; /* flexilint */ DEXP(T6, T5, T7); /* T5=exp(T6), T7=dT5_dT6 */ dT5_dVg = -T7 * dVox_dVg * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); dT5_dVd = -T7 * dVox_dVd * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); dT5_dVb = -T7 * dVox_dVb * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); dT5_dVe = -T7 * dVox_dVe * T2 / T0 * (T4 + (T3 - T4 * Vox) / T0 * dT0_dVox); /* LFW_FD new line */ if (selfheat) dT5_dT = T7 * dT6_dT; else dT5_dT = 0.0; /* flexilint */ Igb2 = T1 * Vgb * Vaux * T5; /* LFW_FD fix 5 derivatives */ dIgb2_dVg = T1 * (Vgb*Vaux*dT5_dVg + dVgb_dVg*Vaux*T5 + Vgb*T5*dVaux_dVg) + Vgb * Vaux * T5 * Leff * dWeff_dVg *agb2 * OxideRatio / here->B4SOInseg; dIgb2_dVd = T1 * (Vgb*Vaux*dT5_dVd + dVgb_dVd*Vaux*T5 + Vgb*T5*dVaux_dVd); dIgb2_dVb = T1 * (Vgb*Vaux*dT5_dVb + dVgb_dVb*Vaux*T5 + Vgb*T5*dVaux_dVb) + Vgb * Vaux * T5 * Leff * dWeff_dVb * agb2 * OxideRatio / here->B4SOInseg; dIgb2_dVe = T1 * (Vgb*Vaux*dT5_dVe + dVgb_dVe*Vaux*T5 + Vgb*T5*dVaux_dVe); if (selfheat) dIgb2_dT = T1 * Vgb * (Vaux*dT5_dT + T5*dVaux_dT) + Vgb * Vaux * T5 * Leff * dWeff_dT * agb2 * OxideRatio / here->B4SOInseg + T1 * dVgs_eff_dT * Vaux * T5; else dIgb2_dT = 0.0; /* Igb1 dominates in inversion region, while Igb2 dominates in accumulation */ /* v2.2.3 bug fix for residue at low Vgb */ if (Vgb >= 0) { Igb = Igb1; dIgb_dVg = dIgb1_dVg; dIgb_dVd = dIgb1_dVd; dIgb_dVb = dIgb1_dVb; dIgb_dVe = dIgb1_dVe; /* v3.0 */ dIgb_dT = dIgb1_dT; } else { Igb = Igb2; dIgb_dVg = dIgb2_dVg; dIgb_dVd = dIgb2_dVd; dIgb_dVb = dIgb2_dVb; dIgb_dVe = dIgb2_dVe; /* LFW_FD fix line */ dIgb_dT = dIgb2_dT; } /*Vfb2 = Vfb + 1.12; Bug fix #18 Jul09*/ Vfb2 = Vfb + eggbcp2; /* bugfix 4.3.1 -Tanvir */ } else { Igb = 0.0; dIgb_dVg = 0.0; dIgb_dVd = 0.0; dIgb_dVb = 0.0; dIgb_dVe = 0.0; /* v3.0 */ dIgb_dT = 0.0; } here->B4SOIig = Igb; here->B4SOIgigg = dIgb_dVg; here->B4SOIgigd = dIgb_dVd; here->B4SOIgigb = dIgb_dVb; here->B4SOIgige = dIgb_dVe; /* v3.0 */ here->B4SOIgigs = -(dIgb_dVg + dIgb_dVd + dIgb_dVb + dIgb_dVe); /* v3.0 */ here->B4SOIgigT = dIgb_dT; /* v4.1 */ /* gate tunneling component in the AGBCP2 region */ /* Vfb2 = Vfb + 1.12; Bug fix #18 Jul09 Code moved to 4370 where Vfb definition is valid*/ if ((model->B4SOIigbMod!= 0) && (here->B4SOIsoiMod != 2) && (here->B4SOIbodyMod != 0) && (here->B4SOIagbcp2 > 0) && (vgp < Vfb2)) /* v4.1: the Igb2_agbcp2 calculation is skipped for the ideal FD mode or if there is no "p" node */ { /* Vfb, Vfb2 are taken as constants in derivative calculation for simplicity */ T0 = vgp - Vfb2; T1 = sqrt(T0 * T0 + 1.0e-4); vgp_eff = 0.5 * (-T0 + T1 - 1.0e-2); dvgp_eff_dvg = 0.5 * (-1.0 + T0 / T1); dvgp_eff_dvp = -dvgp_eff_dvg; dvgp_eff_dT = 0.5 * (1.0 - T0 / T1) * dVfb_dT; /* LFW_FD new line */ /* T11=A* T12=B* */ /*T11 = (model->B4SOItype == NMOS) ? 3.42537e-7 : 4.97232e-7; T12 = (model->B4SOItype == NMOS) ? 1.16645e12 : 7.45669e11; */ T11 = (model->B4SOItype == NMOS) ? agbc2n : agbc2p; /* bugfix 4.3.1 -Tanvir */ T12 = (model->B4SOItype == NMOS) ? bgbc2n : bgbc2p; /* bugfix 4.3.1 -Tanvir */ T2 = vgp * vgp_eff; dT2_dVg = vgp * dvgp_eff_dvg + vgp_eff; dT2_dVp = vgp * dvgp_eff_dvp - vgp_eff; dT2_dT = vgp * dvgp_eff_dT; /* LFW_FD new line */ T3 = pParam->B4SOIaigbcp2 * pParam->B4SOIcigbcp2 - pParam->B4SOIbigbcp2; T4 = pParam->B4SOIbigbcp2 * pParam->B4SOIcigbcp2; T5 = (-T12) * model->B4SOItoxqm * (pParam->B4SOIaigbcp2 + T3 * vgp_eff - T4 * vgp_eff * vgp_eff); if (T5 > EXPL_THRESHOLD) { T6 = MAX_EXPL; dT6_dVg = 0.0; dT6_dVp = 0.0; dT6_dT = 0.0; /* LFW_FD new line */ } else if (T5 < -EXPL_THRESHOLD) { T6 = MIN_EXPL; dT6_dVg = 0.0; dT6_dVp = 0.0; dT6_dT = 0.0; /* LFW_FD new line */ } else { T6 = exp(T5); T7 = T6 * (-T12) * model->B4SOItoxqm * (T3 - 2.0 * T4 * vgp_eff); dT6_dVg = T7 * dvgp_eff_dvg; dT6_dVp = T7 * dvgp_eff_dvg; dT6_dT = T7 * dvgp_eff_dT; /* LFW_FD new line */ } T11 = T11 * here->B4SOIagbcp2 * pParam->B4SOIoxideRatio; Ig_agbcp2 = T11 * T2 * T6; dIg_agbcp2_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg); dIg_agbcp2_dVp = -dIg_agbcp2_dVg; dIg_agbcp2_dT = T11 * (T2 * dT6_dT + T6 * dT2_dT); /* LFW_FD new line */ } else { Ig_agbcp2 = 0.0; dIg_agbcp2_dVg = 0.0; dIg_agbcp2_dVp = 0.0; dIg_agbcp2_dT = 0.0; /* LFW_FD new line */ } here->B4SOIigp = Ig_agbcp2; here->B4SOIgigpg = dIg_agbcp2_dVg; here->B4SOIgigpp = dIg_agbcp2_dVp; /* end of gate-body tunneling */ /* end of v3.0 gate-tunneling */ /* v3.1 */ if (here->B4SOIsoiMod != 2) /* v3.2 */ { Idsmosfet = 0.0; Ratio = dRatio_dVg = dRatio_dVd = dRatio_dVb = dRatio_dVe = dRatio_dT = 0.0; if (model->B4SOIiiiMod == 0 ) { /* calculate substrate current Iii */ if (pParam->B4SOIalpha0 <= 0.0) { Giig = Giib = Giid = GiiT = 0.0; Giie = 0; /* v3.0 */ here->B4SOIiii = Iii = Idsmosfet = dIiibjt_dVb = dIiibjt_dVd = dIiibjt_dT = 0.0; } else { Vdsatii0 = pParam->B4SOIvdsatii0 * (1 + model->B4SOItii * (TempRatio-1.0)) - pParam->B4SOIlii / Leff; if (selfheat) dVdsatii0_dT = pParam->B4SOIvdsatii0 * model->B4SOItii * dTempRatio_dT; else dVdsatii0_dT = 0; /* Calculate VgsStep */ T0 = pParam->B4SOIesatii * Leff; /* v3.0 bug fix: T0 is dimentionless (i.e., scaled by 1V) */ T1 = pParam->B4SOIsii0 * T0 / (1.0 + T0); T0 = 1 / (1 + pParam->B4SOIsii1 * Vgsteff); if (selfheat) dT0_dT = - pParam->B4SOIsii1 * T0 * T0 *dVgsteff_dT; else dT0_dT = 0; T3 = T0 + pParam->B4SOIsii2; T4 = Vgst * pParam->B4SOIsii1 * T0 * T0; T2 = Vgst * T3; dT2_dVg = T3 * (dVgst_dVg - dVth_dVb * dVbseff_dVg) - T4 * dVgsteff_dVg; /* v3.0 */ dT2_dVb = T3 * dVgst_dVb * dVbseff_dVb - T4 * dVgsteff_dVb; dT2_dVe = T3 * dVgst_dVb * dVbseff_dVe - T4 * dVgsteff_dVe; /* v3.0 */ dT2_dVd = T3 * (dVgst_dVd - dVth_dVb * dVbseff_dVd) - T4 * dVgsteff_dVd; /* v3.0 */ if (selfheat) /* fix below expression Wagner */ /*dT2_dT = -(dVth_dT + dVth_dVb * dVbseff_dT) * T3 + Vgst * dT0_dT; v3.0 */ dT2_dT = (dVgst_dT ) * T3 + Vgst * dT0_dT; /* v3.0 */ else dT2_dT = 0; T3 = 1 / (1 + pParam->B4SOIsiid * Vds); dT3_dVd = - pParam->B4SOIsiid * T3 * T3; VgsStep = T1 * T2 * T3; if (selfheat) dVgsStep_dT = T1 * T3 * dT2_dT; else dVgsStep_dT = 0; Vdsatii = Vdsatii0 + VgsStep; Vdiff = Vds - Vdsatii; dVdiff_dVg = - T1 * T3 * dT2_dVg; dVdiff_dVb = - T1 * T3 * dT2_dVb; dVdiff_dVe = - T1 * T3 * dT2_dVe; /* v3.0 */ dVdiff_dVd = 1.0 - T1 * (T3 * dT2_dVd + T2 * dT3_dVd); if (selfheat) dVdiff_dT = -(dVdsatii0_dT + dVgsStep_dT); else dVdiff_dT = 0; T0 = pParam->B4SOIbeta2 + pParam->B4SOIbeta1 * Vdiff + pParam->B4SOIbeta0 * Vdiff * Vdiff; if (T0 < 1e-5) { T0 = 1e-5; dT0_dVg = dT0_dVd = dT0_dVb = dT0_dT = 0.0; dT0_dVe = 0; /* v3.0 */ } else { T1 = pParam->B4SOIbeta1 + 2 * pParam->B4SOIbeta0 * Vdiff; dT0_dVg = T1 * dVdiff_dVg; dT0_dVb = T1 * dVdiff_dVb; dT0_dVd = T1 * dVdiff_dVd; dT0_dVe = T1 * dVdiff_dVe; /* v3.0 */ if (selfheat) dT0_dT = T1 * dVdiff_dT; else dT0_dT = 0; } if ((T0 < Vdiff / EXPL_THRESHOLD) && (Vdiff > 0.0)) { Ratio = pParam->B4SOIalpha0 * MAX_EXPL; dRatio_dVg = dRatio_dVb = dRatio_dVd = dRatio_dT = 0.0; dRatio_dVe = 0; /* v3.0 */ } else if ((T0 < -Vdiff / EXPL_THRESHOLD) && (Vdiff < 0.0)) { Ratio = pParam->B4SOIalpha0 * MIN_EXPL; dRatio_dVg = dRatio_dVb = dRatio_dVd = dRatio_dT = 0.0; dRatio_dVe = 0; /* v3.0 */ } else { Ratio = pParam->B4SOIalpha0 * exp(Vdiff / T0); T1 = Ratio / T0 / T0; dRatio_dVg = T1 * (T0 * dVdiff_dVg - Vdiff * dT0_dVg); dRatio_dVb = T1 * (T0 * dVdiff_dVb - Vdiff * dT0_dVb); dRatio_dVd = T1 * (T0 * dVdiff_dVd - Vdiff * dT0_dVd); /* v3.0 */ dRatio_dVe = T1 * (T0 * dVdiff_dVe - Vdiff * dT0_dVe); if (selfheat) dRatio_dT = T1 * (T0 * dVdiff_dT - Vdiff * dT0_dT); else dRatio_dT = 0; } /* Avoid too high ratio */ if (Ratio > 10.0) { Ratio = 10.0; dRatio_dVg = dRatio_dVb = dRatio_dVd = dRatio_dT = 0.0; dRatio_dVe = 0; /* v3.0 */ } T0 = Ids + pParam->B4SOIfbjtii * Ic; here->B4SOIiii = Iii = Ratio * T0; Giig = Ratio * Gm + T0 * dRatio_dVg; Giib = Ratio * (Gmb + pParam->B4SOIfbjtii * Gcb) + T0 * dRatio_dVb; Giid = Ratio * (Gds + pParam->B4SOIfbjtii * Gcd) + T0 * dRatio_dVd; /* v3.0 */ Giie = Ratio * Gme + T0 * dRatio_dVe; if (selfheat) GiiT = Ratio * (GmT + pParam->B4SOIfbjtii * GcT) + T0 * dRatio_dT; else GiiT = 0.0; } } else /*new Iii model*/ { /*Idsmosfet part*/ if (pParam->B4SOIalpha0 <= 0.0) { /* Giig = Giib = Giid = GiiT = 0.0; */ Giie = 0; /* v3.0 */ /* here->B4SOIiii = Iii = 0.0; v4.2 bugfix #38 */ /* Idsmosfet = 0.0; v4.2 bugfix #38 */ /*dIiibjt_dVb = 0.0; v4.2 bugfix #38 */ /*dIiibjt_dVd = 0.0; */ /*dIiibjt_dT = 0.0; */ Ratio = 0; /* v4.2 bugfix # 38 */ } else { Vdsatii0 = pParam->B4SOIvdsatii0 * (1 + model->B4SOItii * (TempRatio-1.0)) - pParam->B4SOIlii / Leff; if (selfheat) dVdsatii0_dT = pParam->B4SOIvdsatii0 * model->B4SOItii * dTempRatio_dT; else dVdsatii0_dT = 0; /* Calculate VgsStep */ T0 = pParam->B4SOIesatii * Leff; /* v3.0 bug fix: T0 is dimensionless (i.e., scaled by 1V) */ T1 = pParam->B4SOIsii0 * T0 / (1.0 + T0); T0 = 1 / (1 + pParam->B4SOIsii1 * Vgsteff); if (selfheat) dT0_dT = - pParam->B4SOIsii1 * T0 * T0 *dVgsteff_dT; else dT0_dT = 0; T3 = T0 + pParam->B4SOIsii2; T4 = Vgst * pParam->B4SOIsii1 * T0 * T0; T2 = Vgst * T3; dT2_dVg = T3 * (dVgst_dVg - dVth_dVb * dVbseff_dVg) - T4 * dVgsteff_dVg; /* v3.0 */ dT2_dVb = T3 * dVgst_dVb * dVbseff_dVb - T4 * dVgsteff_dVb; dT2_dVe = T3 * dVgst_dVb * dVbseff_dVe - T4 * dVgsteff_dVe; /* v3.0 */ dT2_dVd = T3 * (dVgst_dVd - dVth_dVb * dVbseff_dVd) - T4 * dVgsteff_dVd; /* v3.0 */ if (selfheat) /* fix below expression Wagner */ /*dT2_dT = -(dVth_dT + dVth_dVb * dVbseff_dT) * T3 + Vgst * dT0_dT; v3.0 */ dT2_dT = (dVgst_dT ) * T3 + Vgst * dT0_dT; /* v3.0 */ else dT2_dT = 0; T3 = 1 / (1 + pParam->B4SOIsiid * Vds); dT3_dVd = - pParam->B4SOIsiid * T3 * T3; VgsStep = T1 * T2 * T3; if (selfheat) dVgsStep_dT = T1 * T3 * dT2_dT; else dVgsStep_dT = 0; Vdsatii = Vdsatii0 + VgsStep; Vdiff = Vds - Vdsatii; dVdiff_dVg = - T1 * T3 * dT2_dVg; dVdiff_dVb = - T1 * T3 * dT2_dVb; dVdiff_dVe = - T1 * T3 * dT2_dVe; /* v3.0 */ dVdiff_dVd = 1.0 - T1 * (T3 * dT2_dVd + T2 * dT3_dVd); if (selfheat) dVdiff_dT = -(dVdsatii0_dT + dVgsStep_dT); else dVdiff_dT = 0; T0 = pParam->B4SOIbeta2 + pParam->B4SOIbeta1 * Vdiff + pParam->B4SOIbeta0 * Vdiff * Vdiff; if (T0 < 1e-5) { T0 = 1e-5; dT0_dVg = dT0_dVd = dT0_dVb = dT0_dT = 0.0; dT0_dVe = 0; /* v3.0 */ } else { T1 = pParam->B4SOIbeta1 + 2 * pParam->B4SOIbeta0 * Vdiff; dT0_dVg = T1 * dVdiff_dVg; dT0_dVb = T1 * dVdiff_dVb; dT0_dVd = T1 * dVdiff_dVd; dT0_dVe = T1 * dVdiff_dVe; /* v3.0 */ if (selfheat) dT0_dT = T1 * dVdiff_dT; else dT0_dT = 0; } if ((T0 < Vdiff / EXPL_THRESHOLD) && (Vdiff > 0.0)) { Ratio = pParam->B4SOIalpha0 * MAX_EXPL; dRatio_dVg = dRatio_dVb = dRatio_dVd = dRatio_dT = 0.0; dRatio_dVe = 0; /* v3.0 */ } else if ((T0 < -Vdiff / EXPL_THRESHOLD) && (Vdiff < 0.0)) { Ratio = pParam->B4SOIalpha0 * MIN_EXPL; dRatio_dVg = dRatio_dVb = dRatio_dVd = dRatio_dT = 0.0; dRatio_dVe = 0; /* v3.0 */ } else { Ratio = pParam->B4SOIalpha0 * exp(Vdiff / T0); T1 = Ratio / T0 / T0; dRatio_dVg = T1 * (T0 * dVdiff_dVg - Vdiff * dT0_dVg); dRatio_dVb = T1 * (T0 * dVdiff_dVb - Vdiff * dT0_dVb); dRatio_dVd = T1 * (T0 * dVdiff_dVd - Vdiff * dT0_dVd); /* v3.0 */ dRatio_dVe = T1 * (T0 * dVdiff_dVe - Vdiff * dT0_dVe); if (selfheat) dRatio_dT = T1 * (T0 * dVdiff_dT - Vdiff * dT0_dT); else dRatio_dT = 0; } /* Avoid too high ratio */ if (Ratio > 10.0) { Ratio = 10.0; dRatio_dVg = dRatio_dVb = dRatio_dVd = dRatio_dT = 0.0; dRatio_dVe = 0; /* v3.0 */ } T0 = Ids; Idsmosfet = Ratio * T0; } /*New BJT part*/ T0 = (pParam->B4SOIcbjtii + pParam->B4SOIebjtii * Leff)/Leff; Vbci= pParam->B4SOIvbci*(1.0+model->B4SOItvbci*(TempRatio-1.0)); /*T1 = Vbci - (Vbs - Vds); v4.3 bugfix*/ T1 = Vbci - (vbs_jct - Vds); T2 = pParam->B4SOImbjtii -1.0; /* if(T1 == 0.0) T3 =1.0; else T3 = -pParam->B4SOIabjtii * pow(T1,T2); */ if(T1<=0.0) T3 = 0.0; else T3 = -pParam->B4SOIabjtii * pow(T1,T2); if (T3> EXPL_THRESHOLD) T4 = MAX_EXPL; else if (T3 < -EXPL_THRESHOLD) T4 = MIN_EXPL; else T4 = exp(T3); if (T1==0.0) {if(T3> EXPL_THRESHOLD) { dT4_dVd = 0.0; dT4_dVb = 0.0; } else if (T3 < -EXPL_THRESHOLD) { dT4_dVd = 0.0; dT4_dVb = 0.0; } else { dT4_dVd = - T4 * pParam->B4SOIabjtii* T2 ; dT4_dVb = T4 * pParam->B4SOIabjtii* T2 ; } } else { if(T3> EXPL_THRESHOLD) { dT4_dVd = 0.0; dT4_dVb = 0.0; } else if (T3 < -EXPL_THRESHOLD) { dT4_dVd = 0.0; dT4_dVb = 0.0; } else {T5 = T2-1.0; if (T1<=0.0) { dT4_dVd = 0.0; dT4_dVb = 0.0; } else { dT4_dVd = - T4 * pParam->B4SOIabjtii* T2 * pow(T1,T5); dT4_dVb = T4 * pParam->B4SOIabjtii* T2 * pow(T1,T5); } } } Iiibjt = T0 * Ic * T1 * T4; if (selfheat) {T5= T2-1.0; dVbci_dT = pParam->B4SOIvbci * model->B4SOItvbci *model->B4SOItnom; if(T1<=0.0) dT4_dT = 0.0; else dT4_dT = -T4 * pParam->B4SOIabjtii* T2 * pow(T1,T5)*dVbci_dT; dIiibjt_dT = T0 * Ic * T4 * dVbci_dT + T0 *Ic *T1 * dT4_dT + T0 * GcT *T1 * T4; /* Samuel Mertens */ } else { dVbci_dT = 0.0; dT4_dT =0.0; dIiibjt_dT = 0.0; } /* Xue fix 10/29/2009 */ dIiibjt_dVd = T0 * Ic *T4 + T0 *Ic *T1*dT4_dVd + T0 * dIc_dVd * T1 * T4; dIiibjt_dVb = -T0 * Ic *T4 + T0*Ic*T1*dT4_dVb + T0 * dIc_dVb * T1 * T4; /*Total Iii*/ T0 = Ids; here->B4SOIiii = Iii = Idsmosfet + Iiibjt; Giig = Ratio * Gm + T0 * dRatio_dVg; Giib = Ratio * Gmb + T0 * dRatio_dVb + dIiibjt_dVb; Giid = Ratio * Gds + T0 * dRatio_dVd + dIiibjt_dVd; Giie = Ratio * Gme + T0 * dRatio_dVe; if (selfheat) GiiT = Ratio * GmT + T0 * dRatio_dT + dIiibjt_dT ; else GiiT = 0.0; } /* Current through body resistor */ /* Current going out is +ve */ if ((here->B4SOIbodyMod == 0) || (here->B4SOIbodyMod == 2)) { Ibp = Gbpbs = Gbpps = 0.0; } else { /* here->B4SOIbodyMod == 1 */ if (pParam->B4SOIrbody < 1e-3) /* 3.2 bug fix */ { if (here->B4SOIrbodyext <= 1e-3) /* 3.2 bug fix */ T0 = 1.0 / 1e-3; /* 3.2 bug fix */ else T0 = 1.0 / here->B4SOIrbodyext; Ibp = Vbp * T0; Gbpbs = T0 * dVbp_dVb; Gbpps = -T0 * dVbp_dVb; } else { Gbpbs = 1.0 / (pParam->B4SOIrbody + here->B4SOIrbodyext); Ibp = Vbp * Gbpbs; Gbpps = - Gbpbs; } } here->B4SOIibp = Ibp; here->B4SOIgbpbs = Gbpbs; here->B4SOIgbpps = Gbpps; here->B4SOIgbpT = 0.0; here->B4SOIcbodcon = (Ibp - (Gbpbs * Vbs + Gbpps * Vps)); } else /* v3.1 soiMod=2: ideal FD */ { Giig = Giib = Giid = Giie = GiiT = 0.0; here->B4SOIiii = Iii = 0.0; here->B4SOIibp = Ibp = 0.0; here->B4SOIgbpbs = 0.0; here->B4SOIgbpps = here->B4SOIgbpT = here->B4SOIcbodcon = 0.0; Gbpbs = Gbpps = 0.0; } /* v3.1 */ /* Current going out of drainprime node into the drain of device */ /* "node" means the SPICE circuit node */ here->B4SOIcdrain = Ids + Ic; here->B4SOIcd = Ids + Ic - Ibd + Iii + Igidl; here->B4SOIcb = Ibs + Ibd + Ibp / here->B4SOInf - Iii - Igidl - Igisl - Igb; /* v4.2 bug fix # 27*/ here->B4SOIgds = Gds + Gcd; here->B4SOIgm = Gm; here->B4SOIgmbs = Gmb + Gcb; /* v3.0 */ here->B4SOIgme = Gme; /* v3.1 for RF */ /* Calculate Rg */ if (here->B4SOIrgateMod >1) { T9 = pParam->B4SOIxrcrg2 * model->B4SOIvtm; T0 = T9 *beta; dT0_dVd = (dbeta_dVd + dbeta_dVg * dVgsteff_dVd) * T9; dT0_dVb = (dbeta_dVb + dbeta_dVg * dVgsteff_dVb) * T9; dT0_dVg = dbeta_dVg * T9; T1 = 1 + gche * Rds; T2 = 1 / T1; here->B4SOIgcrg = pParam->B4SOIxrcrg1 * (T0 + here->B4SOIidovVds); dIdlovVdseff_dVg = (T2 * dgche_dVg - IdlovVdseff * gche * dRds_dVg) / T1; dIdlovVdseff_dVd = T2 * dgche_dVd / T1; dIdlovVdseff_dVb = (T2 * dgche_dVb - IdlovVdseff * gche * dRds_dVb) / T1; T9 = diffVds / Va; T3 = 1.0 + T9; T4 = T3 * dIdlovVdseff_dVg - IdlovVdseff * (dVdseff_dVg + T9 * dVa_dVg) / Va; T5 = T3 * dIdlovVdseff_dVd + IdlovVdseff * (1.0 - dVdseff_dVd - T9 * dVa_dVd) / Va; T6 = T3 * dIdlovVdseff_dVb - IdlovVdseff * (dVdseff_dVb + T9 * dVa_dVb) / Va; tmp1 = (T4 * dVgsteff_dVd + T6 * dVbseff_dVd + T5) / here->B4SOInseg; tmp2 = (T4 * dVgsteff_dVg + T6 * dVbseff_dVg) / here->B4SOInseg; tmp3 = (T4 * dVgsteff_dVb + T6 * dVbseff_dVb) / here->B4SOInseg; here->B4SOIgcrgd = pParam->B4SOIxrcrg1 * (dT0_dVd +tmp1); here->B4SOIgcrgg = pParam->B4SOIxrcrg1 * (dT0_dVg * dVgsteff_dVg + tmp2); here->B4SOIgcrgb = pParam->B4SOIxrcrg1 * (dT0_dVb * dVbseff_dVb + tmp3); if (here->B4SOInf != 1.0) { here->B4SOIgcrg *= here->B4SOInf; here->B4SOIgcrgg *= here->B4SOInf; here->B4SOIgcrgd *= here->B4SOInf; here->B4SOIgcrgb *= here->B4SOInf; } if (here->B4SOIrgateMod == 2) { T10 = here->B4SOIgrgeltd * here->B4SOIgrgeltd; T11 = here->B4SOIgrgeltd + here->B4SOIgcrg; here->B4SOIgcrg = here->B4SOIgrgeltd * here->B4SOIgcrg / T11; T12 = T10 / T11 /T11; here->B4SOIgcrgg *= T12; here->B4SOIgcrgd *= T12; here->B4SOIgcrgb *= T12; } here->B4SOIgcrgs = -(here->B4SOIgcrgg + here->B4SOIgcrgd + here->B4SOIgcrgb); } /* v3.1 added Rg for RF end */ /* v4.0 Calculate bias-dependent external S/D resistance */ Rs = Rd = 0.0; /* flexilint */ if (model->B4SOIrdsMod) { /* Rs(V) */ T0 = vgs - pParam->B4SOIvfbsd; T1 = sqrt(T0 * T0 + 1.0e-4); vgs_eff = 0.5 * (T0 + T1); dvgs_eff_dvg = vgs_eff / T1; T0 = 1.0 + pParam->B4SOIprwg * vgs_eff; dT0_dVg = -pParam->B4SOIprwg / T0 / T0 * dvgs_eff_dvg; T1 = -pParam->B4SOIprwb * vbs; dT1_dVb = -pParam->B4SOIprwb; T2 = 1.0 / T0 + T1; T3 = T2 + sqrt(T2 * T2 + 0.01); dT3_dVg = T3 / (T3 - T2); dT3_dVb = dT3_dVg * dT1_dVb; dT3_dVg *= dT0_dVg; T4 = rs0 * 0.5; Rs = rswmin + T3 * T4; dRs_dVg = T4 * dT3_dVg; dRs_dVb = T4 * dT3_dVb; T0 = 1.0 + here->B4SOIsourceConductance * Rs; here->B4SOIgstot = here->B4SOIsourceConductance / T0; T0 = -here->B4SOIgstot * here->B4SOIgstot; dgstot_dvd = 0.0; /* place holder */ dgstot_dve = 0.0; /* place holder */ dgstot_dvg = T0 * dRs_dVg; dgstot_dvb = T0 * dRs_dVb; dgstot_dvs = -(dgstot_dvg + dgstot_dvb + dgstot_dvd + dgstot_dve); if (selfheat) { dRs_dT = drswmin_dT + T3 * 0.5 * drs0_dT; dgstot_dT = T0 * dRs_dT; } else dRs_dT = dgstot_dT = 0.0; /* Rd(V) */ T0 = vgd - pParam->B4SOIvfbsd; T1 = sqrt(T0 * T0 + 1.0e-4); vgd_eff = 0.5 * (T0 + T1); dvgd_eff_dvg = vgd_eff / T1; T0 = 1.0 + pParam->B4SOIprwg * vgd_eff; dT0_dVg = -pParam->B4SOIprwg / T0 / T0 * dvgd_eff_dvg; T1 = -pParam->B4SOIprwb * vbd; dT1_dVb = -pParam->B4SOIprwb; T2 = 1.0 / T0 + T1; T3 = T2 + sqrt(T2 * T2 + 0.01); dT3_dVg = T3 / (T3 - T2); dT3_dVb = dT3_dVg * dT1_dVb; dT3_dVg *= dT0_dVg; /*T4 = pParam->B4SOIrd0 * 0.5;*/ /* v4.2 bugfix # 37 */ /*Rd = pParam->B4SOIrdwmin + T3 * T4;*/ /* v4.2 bugfix # 37 */ T4 = rd0 * 0.5; Rd = rdwmin + T3 * T4; dRd_dVg = T4 * dT3_dVg; dRd_dVb = T4 * dT3_dVb; T0 = 1.0 + here->B4SOIdrainConductance * Rd; here->B4SOIgdtot = here->B4SOIdrainConductance / T0; T0 = -here->B4SOIgdtot * here->B4SOIgdtot; dgdtot_dvs = 0.0; dgdtot_dve = 0.0; dgdtot_dvg = T0 * dRd_dVg; dgdtot_dvb = T0 * dRd_dVb; dgdtot_dvd = -(dgdtot_dvg + dgdtot_dvb + dgdtot_dvs + dgdtot_dve); if (selfheat) { dRd_dT = drdwmin_dT + T3 * 0.5 * drd0_dT; dgdtot_dT = T0 * dRd_dT; } else dRd_dT = dgdtot_dT = 0.0; here->B4SOIgstotd = vses * dgstot_dvd; here->B4SOIgstotg = vses * dgstot_dvg; here->B4SOIgstots = vses * dgstot_dvs; here->B4SOIgstotb = vses * dgstot_dvb; T2 = vdes - vds; here->B4SOIgdtotd = T2 * dgdtot_dvd; here->B4SOIgdtotg = T2 * dgdtot_dvg; here->B4SOIgdtots = T2 * dgdtot_dvs; here->B4SOIgdtotb = T2 * dgdtot_dvb; } else { here->B4SOIgstot = here->B4SOIgstotd = here->B4SOIgstotg = here->B4SOIgstots = here->B4SOIgstotb = 0.0; here->B4SOIgdtot = here->B4SOIgdtotd = here->B4SOIgdtotg = here->B4SOIgdtots = here->B4SOIgdtotb = 0.0; } if (selfheat) here->B4SOIgmT = GmT + GcT; else here->B4SOIgmT = 0.0; /* note that sign is switched because power flows out of device into the temperature node. Currently omit self-heating due to bipolar current because it can cause convergence problem*/ here->B4SOIgtempg = -model->B4SOItype*Gm * Vds; here->B4SOIgtempb = -model->B4SOItype*Gmb * Vds; /* v3.0 */ here->B4SOIgtempe = -model->B4SOItype*Gme * Vds; here->B4SOIgtempT = -GmT * Vds; here->B4SOIgtempd = -model->B4SOItype* (Gds * Vds + Ids); here->B4SOIcth = - Ids * Vds - model->B4SOItype * (here->B4SOIgtempg * Vgs + here->B4SOIgtempb * Vbs + here->B4SOIgtempe * Ves + here->B4SOIgtempd * Vds) - here->B4SOIgtempT * delTemp; /* v3.0 */ /* Body current which flows into drainprime node from the drain of device */ here->B4SOIgjdb = Gjdb - Giib -Ggidlb - Ggislb; /* v4.0 */ here->B4SOIgjdd = Gjdd - (Giid + Ggidld); here->B4SOIgjdg = - (Giig + Ggidlg + Ggislg); here->B4SOIgjde = - Giie; if (selfheat) here->B4SOIgjdT = GjdT - GiiT; else here->B4SOIgjdT = 0.0; here->B4SOIcjd = Ibd - Iii - Igidl - (here->B4SOIgjdb * Vbs + here->B4SOIgjdd * Vds + here->B4SOIgjdg * Vgs + here->B4SOIgjde * Ves + here->B4SOIgjdT * delTemp); /* v3.0 */ if (!here->B4SOIrbodyMod) { Giigidl_b = Giigidl_d = Giigidl_g = Giigidl_e = Giigidl_T = Iii_Igidl = 0.0; } else { here->B4SOIgiigidlb = Giib + Ggidlb + Ggislb; here->B4SOIgiigidld = Giid + Ggidld; Giigidl_b = - Giib -Ggidlb - Ggislb; Giigidl_d = - Giid -Ggidld; Giigidl_g = - Giig -Ggidlg - Ggislg; Giigidl_e = - Giie; if (selfheat) Giigidl_T = -GiiT; else GiiT = Giigidl_T = 0.0; /*Idbdp = Ibd - ( Gjdb * vbs_jct + Gjdd * Vds + GjdT * delTemp); v4.2 bugfix */ Idbdp = Ibd - ( Gjdb * vbd_jct + Gjdd * Vds + GjdT * delTemp); /* Iii_Igidl = - Iii - Igidl + Giigidl_b * Vbs + Giigidl_d * Vds + Giigidl_g * Vgs + Giigidl_e * Ves + Giigidl_T * delTemp ; */ } /* Body current which flows into sourceprime node from the source of device */ here->B4SOIgjsg = 0.0; here->B4SOIgjsd = Gjsd; here->B4SOIgjsb = Gjsb; /* v4.0 */ if (selfheat) here->B4SOIgjsT = GjsT; else here->B4SOIgjsT = 0.0; here->B4SOIcjs = Ibs - Igisl -( here->B4SOIgjsb * Vbs + here->B4SOIgjsd * Vds + here->B4SOIgjsg * Vgs + here->B4SOIgjsT * delTemp); if (here->B4SOIrbodyMod) { Isbsp = Ibs - ( Gjsb * vbs_jct + Gjsd * Vds + GjsT * delTemp ); } /* Current flowing into body node */ here->B4SOIgbbs = Giib - Gjsb - Gjdb - Gbpbs / here->B4SOInf; /* v4.2 bug fix #27 */ here->B4SOIgbgs = Giig + Ggidlg + Ggislg; here->B4SOIgbds = Giid + Ggidld + Ggisls - Gjsd - Gjdd; here->B4SOIgbes = Giie; here->B4SOIgbps = - Gbpps / here->B4SOInf; /* v4.2 bug fix #27 */ if (selfheat) here->B4SOIgbT = GiiT - GjsT - GjdT; else here->B4SOIgbT = 0.0; if (!here->B4SOIrbodyMod) { here->B4SOIcbody = Iii + Igidl + Igisl - Ibs - Ibd - Ibp / here->B4SOInf + Igb /* v4.2 bug fix #27 */ - ( (here->B4SOIgbbs + dIgb_dVb) * Vbs + (here->B4SOIgbgs + dIgb_dVg) * Vgs + (here->B4SOIgbds + dIgb_dVd) * Vds + here->B4SOIgbps * Vps + (here->B4SOIgbes + dIgb_dVe) * Ves + (here->B4SOIgbT + dIgb_dT) * delTemp); } if (here->B4SOIrbodyMod) { here->B4SOIgbgiigbpb = Giib - Gbpbs / here->B4SOInf; /* v4.3 bug fix */ here->B4SOIcbody = Iii + Igidl + Igisl - Ibp / here->B4SOInf + Igb /* v4.2 bug fix #27 */ - ( (Giib - Gbpbs / here->B4SOInf + dIgb_dVb) * Vbs /* v4.2 bug fix #27 */ + (here->B4SOIgbgs + dIgb_dVg) * Vgs + (Giid + Ggidld + dIgb_dVd) * Vds + here->B4SOIgbps * Vps + (here->B4SOIgbes + dIgb_dVe) * Ves + (GiiT + dIgb_dT) * delTemp ); } here->B4SOIcgate = Igb - (dIgb_dVb * Vbs + dIgb_dVe * Ves + dIgb_dVg * Vgs + dIgb_dVd * Vds + dIgb_dT * delTemp); /* v3.0 */ /* Calculate Qinv for Noise analysis */ T1 = Vgsteff * (1.0 - 0.5 * Abulk * Vdseff / Vgst2Vtm); here->B4SOIqinv = -model->B4SOIcox * pParam->B4SOIweff * here->B4SOInf * Leff * T1; /* v4.0 */ if (here->B4SOInf != 1) { here->B4SOIcdrain *= here->B4SOInf; here->B4SOIcd *= here->B4SOInf; here->B4SOIcb *= here->B4SOInf; /* Fix NF problem with tnoimod=1 - LFW */ here->B4SOIidovVds *= here->B4SOInf; here->B4SOIgds *= here->B4SOInf; here->B4SOIgm *= here->B4SOInf; here->B4SOIgmbs *= here->B4SOInf; here->B4SOIgme *= here->B4SOInf; /* Xue fix 10/29/2009 */ /* here->B4SOIgmT *= here->B4SOInf; *added in line 5424 */ here->B4SOIcbody *= here->B4SOInf; here->B4SOIcgate *= here->B4SOInf; here->B4SOIIgcs *= here->B4SOInf; here->B4SOIgIgcsg *= here->B4SOInf; here->B4SOIgIgcsd *= here->B4SOInf; here->B4SOIgIgcsb *= here->B4SOInf; here->B4SOIgIgcse *= here->B4SOInf; /* LFW_FD new line */ here->B4SOIIgcd *= here->B4SOInf; here->B4SOIgIgcdg *= here->B4SOInf; here->B4SOIgIgcdd *= here->B4SOInf; here->B4SOIgIgcdb *= here->B4SOInf; here->B4SOIgIgcde *= here->B4SOInf; /* LFW_FD new line */ here->B4SOIIgs *= here->B4SOInf; here->B4SOIgIgsg *= here->B4SOInf; here->B4SOIgIgss *= here->B4SOInf; here->B4SOIIgd *= here->B4SOInf; here->B4SOIgIgdg *= here->B4SOInf; here->B4SOIgIgdd *= here->B4SOInf; here->B4SOIig *= here->B4SOInf; here->B4SOIgigg *= here->B4SOInf; here->B4SOIgigd *= here->B4SOInf; here->B4SOIgigb *= here->B4SOInf; here->B4SOIgige *= here->B4SOInf; here->B4SOIgigT *= here->B4SOInf; here->B4SOIcjs *= here->B4SOInf; here->B4SOIcjd *= here->B4SOInf; here->B4SOIibs *= here->B4SOInf; here->B4SOIibd *= here->B4SOInf; Idbdp *= here->B4SOInf; /*v4.2 bug fix Idbdp needs update as Ibd for nf!=1*/ Isbsp *= here->B4SOInf; /*v4.2 bug fix Isbsp needs update as Ibd for nf!=1*/ here->B4SOIgbbs *= here->B4SOInf; here->B4SOIgbgs *= here->B4SOInf; here->B4SOIgbds *= here->B4SOInf; here->B4SOIgbes *= here->B4SOInf; here->B4SOIgbps *= here->B4SOInf; here->B4SOIgbT *= here->B4SOInf; here->B4SOIigidl *= here->B4SOInf; here->B4SOIigisl *= here->B4SOInf; /* bugfix_snps NF*/ here->B4SOIgjdb *= here->B4SOInf; here->B4SOIgjdd *= here->B4SOInf; here->B4SOIgjdg *= here->B4SOInf; here->B4SOIgjde *= here->B4SOInf; here->B4SOIgjdT *= here->B4SOInf; here->B4SOIgjsb *= here->B4SOInf; here->B4SOIgjsd *= here->B4SOInf; here->B4SOIgjsg *= here->B4SOInf; here->B4SOIgjsT *= here->B4SOInf; here->B4SOIcth *= here->B4SOInf; here->B4SOIgmT *= here->B4SOInf; here->B4SOIgtempg *= here->B4SOInf; here->B4SOIgtempb *= here->B4SOInf; here->B4SOIgtempe *= here->B4SOInf; here->B4SOIgtempT *= here->B4SOInf; here->B4SOIgtempd *= here->B4SOInf; here->B4SOIiii *= here->B4SOInf; /* bugfix NF ends */ } here->B4SOIgigs = -(here->B4SOIgigg + here->B4SOIgigd + here->B4SOIgigb + here->B4SOIgige); /* LFW_FD fix 2 derivatives */ here->B4SOIgIgcss = -(here->B4SOIgIgcsg + here->B4SOIgIgcsd + here->B4SOIgIgcsb + here->B4SOIgIgcse); here->B4SOIgIgcds = -(here->B4SOIgIgcdg + here->B4SOIgIgcdd + here->B4SOIgIgcdb + here->B4SOIgIgcde); /* Begin CV (charge) model */ /* LFW_FD 9 new lines - flexilint */ Cbb = Cbd = Cbg = 0.0; Qsub0 = Qac0 = 0.0; qjs = qjd = 0.0; CboxWL = 0.0; Qe1 = dQe1_dVb = dQe1_dVe = dQe1_dT = 0; Vfbeff2=dVfbeff2_dVd=dVfbeff2_dVrg=dVfbeff2_dVg=dVfbeff2_dVb=dVfbeff2_dVe=dVfbeff2_dT=0.0; VdseffCV2 = dVdseffCV2_dVg = dVdseffCV2_dVd = dVdseffCV2_dVb = dVdseffCV2_dVe = 0.0; Vgsteff2 = 0.0; dVgsteff2_dVd=dVgsteff2_dVg=dVgsteff2_dVb=dVgsteff2_dVe=dVgsteff2_dT=0.0; if ((model->B4SOIxpart < 0) || (!ChargeComputationNeeded)) { qgate = qdrn = qsrc = qbody = qsub = 0.0; /* v2.2.3 bug fix */ Qsub0=Qac0=Cbb=Cbg=Cbd=0; /* Bugfix #19 Jul09*/ here->B4SOIcggb = here->B4SOIcgsb = here->B4SOIcgdb = 0.0; here->B4SOIcdgb = here->B4SOIcdsb = here->B4SOIcddb = 0.0; here->B4SOIcbgb = here->B4SOIcbsb = here->B4SOIcbdb = 0.0; goto finished; } else { qgate = qdrn = qsrc = qbody = qsub = 0.0; /* flexilint */ CoxWL = model->B4SOIcox * (pParam->B4SOIweffCV / here->B4SOInseg * here->B4SOInf /* v4.0 */ * pParam->B4SOIleffCV + here->B4SOIagbcp); CoxWLb = model->B4SOIfbody * model->B4SOIcox * (pParam->B4SOIweffCV / here->B4SOInseg * here->B4SOInf /* v4.0 */ * pParam->B4SOIleffCVb + here->B4SOIagbcp); /* v4.1 for improved BT charge model */ CoxWL2 = model->B4SOIcox * here->B4SOIagbcp2; CoxWLb2 = model->B4SOIfbody * model->B4SOIcox * here->B4SOIagbcp2; /* end v4.1 */ /* v3.2 Seperate VgsteffCV with noff */ noff = n * pParam->B4SOInoff; dnoff_dVg = pParam->B4SOInoff * dn_dVg; /* LFW_FD new line */ dnoff_dVd = pParam->B4SOInoff * dn_dVd; dnoff_dVb = pParam->B4SOInoff * dn_dVb; dnoff_dVe = pParam->B4SOInoff * dn_dVe; /* LFW_FD new line */ dnoff_dT = pParam->B4SOInoff * dn_dT; /* new line Wagner */ if (model->B4SOIvgstcvMod == 0) { if ((VgstNVt > -EXPL_THRESHOLD) && (VgstNVt < EXPL_THRESHOLD)) { TL1 = ExpVgst; /* LFW_FD new line */ ExpVgst *= ExpVgst; ExpVgst *= exp( -(pParam->B4SOIdelvt / (noff * Vtm))); /* LFW_FD 4 new derivatives */ dExpVgst_dVg = 2.0 * TL1 * dExpVgst_dVg * exp( -pParam->B4SOIdelvt / (noff * Vtm)) + ExpVgst * pParam->B4SOIdelvt * dnoff_dVg / (noff * noff * Vtm); dExpVgst_dVd = 2.0 * TL1 * dExpVgst_dVd * exp( -pParam->B4SOIdelvt / (noff * Vtm)) + ExpVgst * pParam->B4SOIdelvt * dnoff_dVd / (noff * noff * Vtm); dExpVgst_dVb = 2.0 * TL1 * dExpVgst_dVb * exp( -pParam->B4SOIdelvt / (noff * Vtm)) + ExpVgst * pParam->B4SOIdelvt * dnoff_dVb / (noff * noff * Vtm); dExpVgst_dVe = 2.0 * TL1 * dExpVgst_dVe * exp( -pParam->B4SOIdelvt / (noff * Vtm)) + ExpVgst * pParam->B4SOIdelvt * dnoff_dVe / (noff * noff * Vtm); Vgsteff = noff * Vtm * log(1.0 + ExpVgst); /* LFW_FD 4 fix derivatives */ dVgsteff_dVg = Vgsteff * dnoff_dVg / noff + noff * Vtm * dExpVgst_dVg / (1.0 + ExpVgst); dVgsteff_dVd = Vgsteff * dnoff_dVd / noff + noff * Vtm * dExpVgst_dVd / (1.0 + ExpVgst); dVgsteff_dVb = Vgsteff * dnoff_dVb / noff + noff * Vtm * dExpVgst_dVb / (1.0 + ExpVgst); dVgsteff_dVe = Vgsteff * dnoff_dVe / noff + noff * Vtm * dExpVgst_dVe / (1.0 + ExpVgst); T0 = ExpVgst / (1.0 + ExpVgst); T2 = 2.0 * pParam->B4SOImstar * pParam->B4SOInoff; /* LFW_FD new line */ T1 = -T0 * (T2*dVth_dVb + (T2*Vgst-pParam->B4SOIdelvt) / noff * dnoff_dVb) + Vgsteff / noff * dnoff_dVb; /* LFW_FD fix line */ /* LFW_FD fix _dT derivatives */ if (selfheat) { dExpVgst_dT = 2.0 * TL1 * dExpVgst_dT * exp( -pParam->B4SOIdelvt / (noff * Vtm)) + ExpVgst * pParam->B4SOIdelvt * (dVtm_dT / Vtm + dnoff_dT / noff) / (noff * Vtm); dVgsteff_dT = Vgsteff * (dnoff_dT / noff + dVtm_dT / Vtm) + noff * Vtm * dExpVgst_dT / (1.0 + ExpVgst); } else dVgsteff_dT = 0.0; /* v4.1 */ if (here->B4SOIagbcp2 > 0) { /* ExpVgst2 = ExpVgst * exp(-1.12 / noff / Vtm); */ ExpVgst2 = ExpVgst * exp(-eggbcp2 / noff / Vtm); /* bugfix 4.3.1 -Tanvir */ /* LFW_FD add 4 derivatives */ dExpVgst2_dVg = dExpVgst_dVg * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVg / (noff * noff * Vtm); dExpVgst2_dVd = dExpVgst_dVd * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVd / (noff * noff * Vtm); dExpVgst2_dVb = dExpVgst_dVb * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVb / (noff * noff * Vtm); dExpVgst2_dVe = dExpVgst_dVe * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVe / (noff * noff * Vtm); Vgsteff2 = noff * Vtm * log(1.0 + ExpVgst2); /* LFW_FD fix 4 derivatives */ dVgsteff2_dVg = Vgsteff2 * dnoff_dVg / noff + noff * Vtm * dExpVgst2_dVg / (1.0 + ExpVgst2); dVgsteff2_dVd = Vgsteff2 * dnoff_dVd / noff + noff * Vtm * dExpVgst2_dVd / (1.0 + ExpVgst2); dVgsteff2_dVb = Vgsteff2 * dnoff_dVb / noff + noff * Vtm * dExpVgst2_dVb / (1.0 + ExpVgst2); dVgsteff2_dVe = Vgsteff2 * dnoff_dVe / noff + noff * Vtm * dExpVgst2_dVe / (1.0 + ExpVgst2); T02 = ExpVgst2 / (1.0 + ExpVgst2); /* T12 = -T02 * (dVth_dVb + (Vgst-1.12-pParam->B4SOIdelvt) / noff * dnoff_dVb) + Vgsteff2 / noff * dnoff_dVb; */ T12 = -T02 * (dVth_dVb + (Vgst-eggbcp2-pParam->B4SOIdelvt) / noff * dnoff_dVb) + Vgsteff2 / noff * dnoff_dVb; /* bugfix 4.3.1 -Tanvir */ if (selfheat) /*fix below expression Wagner */ /*dVgsteff2_dT = -T02 * (dVth_dT+dVth_dVb*dVbseff_dT */ /* dVgsteff2_dT = -T02 * (-dVgst_dT + (Vgst - 1.12 - pParam->B4SOIdelvt) / Temp) + Vgsteff2 / Temp; */ /* bugfix 4.3.1 -Tanvir */ dVgsteff2_dT = -T02 * (-dVgst_dT + (Vgst - eggbcp2 - pParam->B4SOIdelvt) / Temp) + Vgsteff2 / Temp; else dVgsteff2_dT = 0.0; } } } else if (model->B4SOIvgstcvMod == 1) { ExpVgst = exp(VgstNVt/(pParam->B4SOImstar * pParam->B4SOInoff)); ExpVgst *= exp( -(pParam->B4SOIdelvt / (noff * Vtm))); /* LFW_FD add 4 derivatives */ dExpVgst_dVg = ExpVgst * (dVgstNVt_dVg/(pParam->B4SOImstar * pParam->B4SOInoff) + pParam->B4SOIdelvt * dnoff_dVg / (noff * noff * Vtm)); dExpVgst_dVd = ExpVgst * (dVgstNVt_dVd/(pParam->B4SOImstar * pParam->B4SOInoff) + pParam->B4SOIdelvt * dnoff_dVd / (noff * noff * Vtm)); dExpVgst_dVb = ExpVgst * (dVgstNVt_dVb/(pParam->B4SOImstar * pParam->B4SOInoff) + pParam->B4SOIdelvt * dnoff_dVb / (noff * noff * Vtm)); dExpVgst_dVe = ExpVgst * (dVgstNVt_dVe/(pParam->B4SOImstar * pParam->B4SOInoff) + pParam->B4SOIdelvt * dnoff_dVe / (noff * noff * Vtm)); Vgsteff = noff * Vtm * log(1.0 + ExpVgst); /* LFW_FD fix 4 derivatives */ dVgsteff_dVg = Vgsteff * dnoff_dVg / noff + noff * Vtm * dExpVgst_dVg / (1.0 + ExpVgst); dVgsteff_dVd = Vgsteff * dnoff_dVd / noff + noff * Vtm * dExpVgst_dVd / (1.0 + ExpVgst); dVgsteff_dVb = Vgsteff * dnoff_dVb / noff + noff * Vtm * dExpVgst_dVb / (1.0 + ExpVgst); dVgsteff_dVe = Vgsteff * dnoff_dVe / noff + noff * Vtm * dExpVgst_dVe / (1.0 + ExpVgst); T0 = ExpVgst / (1.0 + ExpVgst); T1 = -T0 * (dVth_dVb + (Vgst-pParam->B4SOIdelvt) / noff * dnoff_dVb) + Vgsteff / noff * dnoff_dVb; if (selfheat) /*fix below expression Wagner */ /*dVgsteff_dT = -T0 * (dVth_dT+dVth_dVb*dVbseff_dT */ dVgsteff_dT = -T0 * (-dVgst_dT + (Vgst - pParam->B4SOIdelvt) / Temp) + Vgsteff / Temp; else dVgsteff_dT = 0.0; /* v4.1 */ if (here->B4SOIagbcp2 > 0) { /* ExpVgst2 = ExpVgst * exp(-1.12 / noff / Vtm); */ ExpVgst2 = ExpVgst * exp(-eggbcp2 / noff / Vtm); /* bugfix 4.3.1 -Tanvir */ /* LFW_FD add 4 derivatives */ dExpVgst2_dVg = dExpVgst_dVg * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVg / (noff * noff * Vtm); dExpVgst2_dVd = dExpVgst_dVd * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVd / (noff * noff * Vtm); dExpVgst2_dVb = dExpVgst_dVb * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVb / (noff * noff * Vtm); dExpVgst2_dVe = dExpVgst_dVe * exp(-eggbcp2 / noff / Vtm) + ExpVgst2 * eggbcp2 * dnoff_dVe / (noff * noff * Vtm); Vgsteff2 = noff * Vtm * log(1.0 + ExpVgst2); /* LFW_FD fix 4 derivatives */ dVgsteff2_dVg = Vgsteff2 * dnoff_dVg / noff + noff * Vtm * dExpVgst2_dVg / (1.0 + ExpVgst2); dVgsteff2_dVd = Vgsteff2 * dnoff_dVd / noff + noff * Vtm * dExpVgst2_dVd / (1.0 + ExpVgst2); dVgsteff2_dVb = Vgsteff2 * dnoff_dVb / noff + noff * Vtm * dExpVgst2_dVb / (1.0 + ExpVgst2); dVgsteff2_dVe = Vgsteff2 * dnoff_dVe / noff + noff * Vtm * dExpVgst2_dVe / (1.0 + ExpVgst2); T02 = ExpVgst2 / (1.0 + ExpVgst2); /* T12 = -T02 * (dVth_dVb + (Vgst-1.12-pParam->B4SOIdelvt) / noff * dnoff_dVb) + Vgsteff2 / noff * dnoff_dVb; */ T12 = -T02 * (dVth_dVb + (Vgst-eggbcp2-pParam->B4SOIdelvt) / noff * dnoff_dVb) + Vgsteff2 / noff * dnoff_dVb; /* bugfix 4.3.1 -Tanvir */ dVgsteff2_dVb = T12 * dVbseff_dVb; dVgsteff2_dVe = T12 * dVbseff_dVe; if (selfheat) /*fix below expression Wagner */ /*dVgsteff2_dT = -T02 * (dVth_dT+dVth_dVb*dVbseff_dT */ /* dVgsteff2_dT = -T02 * (-dVgst_dT + (Vgst - 1.12 - pParam->B4SOIdelvt) / Temp) + Vgsteff2 / Temp; */ dVgsteff2_dT = -T02 * (-dVgst_dT + (Vgst - eggbcp2 - pParam->B4SOIdelvt) / Temp) + Vgsteff2 / Temp; /* bugfix 4.3.1 -Tanvir */ else dVgsteff2_dT = 0.0; } } else { T10 = noff * Vtm; VgstNVt = pParam->B4SOImstarcv * (Vgst - pParam->B4SOIdelvt) / T10; /* LFW_FD add 4 derivatives */ dVgstNVt_dVg = (pParam->B4SOImstarcv * dVgst_dVg - VgstNVt * dnoff_dVg * Vtm) / T10; dVgstNVt_dVd = (pParam->B4SOImstarcv * dVgst_dVd - VgstNVt * dnoff_dVd * Vtm) / T10; dVgstNVt_dVb = (pParam->B4SOImstarcv * dVgst_dVb - VgstNVt * dnoff_dVb * Vtm) / T10; dVgstNVt_dVe = (pParam->B4SOImstarcv * dVgst_dVe - VgstNVt * dnoff_dVe * Vtm) / T10; ExpArg = (pParam->B4SOIvoffcv - (1- pParam->B4SOImstarcv) * (Vgst - pParam->B4SOIdelvt))/ T10; /* LFW_FD add 4 derivatives */ dExpArg_dVg = (-(1- pParam->B4SOImstarcv) * dVgst_dVg - ExpArg * dnoff_dVg * Vtm) / T10; dExpArg_dVd = (-(1- pParam->B4SOImstarcv) * dVgst_dVd - ExpArg * dnoff_dVd * Vtm) / T10; dExpArg_dVb = (-(1- pParam->B4SOImstarcv) * dVgst_dVb - ExpArg * dnoff_dVb * Vtm) / T10; dExpArg_dVe = (-(1- pParam->B4SOImstarcv) * dVgst_dVe - ExpArg * dnoff_dVe * Vtm) / T10; /* 11 lines new Wagner */ if (selfheat) { dT10_dT = noff * dVtm_dT + dnoff_dT * Vtm; /* fix below expression Wagner */ /*dVgstNVt_dT = -(pParam->B4SOImstarcv*dVth_dT + VgstNVt*dT10_dT)/T10; */ dVgstNVt_dT = -(-pParam->B4SOImstarcv*dVgst_dT + VgstNVt*dT10_dT)/T10; /* fix below expression Wagner */ dExpArg_dT = -(1- pParam->B4SOImstarcv)*dVgst_dT/T10 -ExpArg*dT10_dT/T10; } else { dT10_dT = 0.0; dVgstNVt_dT = 0.0; dExpArg_dT = 0.0; } /* MCJ: Very small Vgst */ if (VgstNVt > EXPL_THRESHOLD) { Vgsteff = Vgst - pParam->B4SOIdelvt; /* T0 is dVgsteff_dVbseff */ T0 = -dVth_dVb; /* LFW_FD fix 4 derivatives */ dVgsteff_dVg = dVgst_dVg; dVgsteff_dVd = dVgst_dVd; dVgsteff_dVb = dVgst_dVb; dVgsteff_dVe = dVgst_dVe; if (selfheat) /*fix below expression Wagner */ /*dVgsteff_dT = -dVth_dT + T0 * dVbseff_dT; */ dVgsteff_dT = dVgst_dT; else dVgsteff_dT = 0.0; } else if (ExpArg > EXPL_THRESHOLD) { T0 = (Vgst - pParam->B4SOIdelvt - pParam->B4SOIvoffcv) / (noff * Vtm); ExpVgst = exp(T0); /* LFW_FD add 4 derivatives */ dExpVgst_dVg = (dVgst_dVg - T0 * dnoff_dVg * Vtm) /(noff * Vtm); dExpVgst_dVd = (dVgst_dVd - T0 * dnoff_dVd * Vtm) /(noff * Vtm); dExpVgst_dVb = (dVgst_dVb - T0 * dnoff_dVb * Vtm) /(noff * Vtm); dExpVgst_dVe = (dVgst_dVe - T0 * dnoff_dVe * Vtm) /(noff * Vtm); /*Vgsteff = Vtm * pParam->B4SOIcdep0 / model->B4SOIcox * ExpVgst;*/ /*v4.2 bug fix*/ Vgsteff = Vtm * cdep0 / model->B4SOIcox * ExpVgst; /* v4.2 bug fix */ T3 = Vgsteff / (noff * Vtm) ; /* T1 is dVgsteff_dVbseff */ /* T1 = -T3 * (dVth_dVb + T0 * Vtm * dnoff_dVb); */ T1 = -T3 * ( T0 * Vtm * dnoff_dVb); /* LFW_FD fixed line */ /* LFW_FD fix 4 derivatives */ dVgsteff_dVg = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVg; dVgsteff_dVd = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVd; dVgsteff_dVb = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVb; dVgsteff_dVe = Vtm * cdep0 / model->B4SOIcox * dExpVgst_dVe; if (selfheat) /*fix below expression Wagner */ /*dVgsteff_dT = -T3 * (dVth_dT + T0 * dVtm_dT * noff) + Vgsteff / Temp+ T1 * dVbseff_dT;*/ dVgsteff_dT = -T3 * (-dVgst_dT + T0 * dVtm_dT * noff + Vtm * dnoff_dT) + Vgsteff / Temp; else dVgsteff_dT = 0.0; } else { ExpVgst = exp(VgstNVt); /* LFW_FD add 4 derivatives */ dExpVgst_dVg = ExpVgst * dVgstNVt_dVg; dExpVgst_dVd = ExpVgst * dVgstNVt_dVd; dExpVgst_dVb = ExpVgst * dVgstNVt_dVb; dExpVgst_dVe = ExpVgst * dVgstNVt_dVe; T1 = T10 * log(1.0 + ExpVgst); /* LFW_FD fix 4 derivatives */ dT1_dVg = T10 * dExpVgst_dVg / (1.0 + ExpVgst) + T1 * dnoff_dVg / noff; dT1_dVd = T10 * dExpVgst_dVd / (1.0 + ExpVgst) + T1 * dnoff_dVd / noff; dT1_dVb = T10 * dExpVgst_dVb / (1.0 + ExpVgst) + T1 * dnoff_dVb / noff; dT1_dVe = T10 * dExpVgst_dVe / (1.0 + ExpVgst) + T1 * dnoff_dVe / noff; /*fix below expression Wagner */ /*T3 = (1.0 / Temp); */ T3 = (1.0 / Temp + dnoff_dT / noff); if (selfheat) /*fix below expression Wagner */ /*dT1_dT = -dT1_dVg * (dVth_dT + (Vgst-pParam->B4SOIdelvt) * T3) + T1 * T3;*/ dT1_dT = dT10_dT * log(1.0 + ExpVgst) + T10 * ExpVgst / (1.0 + ExpVgst) * dVgstNVt_dT; else dT1_dT = 0.0; /* dT2_dVg = -model->B4SOIcox / (Vtm * pParam->B4SOIcdep0) */ /* * exp(ExpArg) * (1 - pParam->B4SOImstarcv); v4.2 bug fix */ dT2_dVg = -model->B4SOIcox / (Vtm * cdep0) * exp(ExpArg) * (1 - pParam->B4SOImstarcv); /* v4.2 bug fix */ T2 = pParam->B4SOImstarcv - T10 * dT2_dVg / (1.0 - pParam->B4SOImstarcv); /* LFW_FD 5 new lines */ TL1 = dT2_dVg; dTL1_dVg = TL1 * dExpArg_dVg; dTL1_dVd = TL1 * dExpArg_dVd; dTL1_dVb = TL1 * dExpArg_dVb; dTL1_dVe = TL1 * dExpArg_dVe; /* LFW_FD fix/add 5 derivatives */ dT2_dVg = -(dnoff_dVg * Vtm * TL1 + T10 * dTL1_dVg) / (1.0 - pParam->B4SOImstarcv); dT2_dVd = -(dnoff_dVd * Vtm * TL1 + T10 * dTL1_dVd) / (1.0 - pParam->B4SOImstarcv); dT2_dVb = -(dnoff_dVb * Vtm * TL1 + T10 * dTL1_dVb) / (1.0 - pParam->B4SOImstarcv); dT2_dVe = -(dnoff_dVe * Vtm * TL1 + T10 * dTL1_dVe) / (1.0 - pParam->B4SOImstarcv); if (selfheat) dT2_dT = -(dT10_dT*TL1 +T10*TL1*(-dVtm_dT/Vtm-dcdep0_dT/cdep0+dExpArg_dT) )/(1.0 - pParam->B4SOImstarcv); else dT2_dT = 0.0; Vgsteff = T1 / T2; T3 = T2 * T2; /* T4 is dVgsteff_dVbseff */ T4 = (T2 * dT1_dVb - T1 * dT2_dVb) / T3; /* LFW_FD fix 4 derivatives */ dVgsteff_dVg = (T2 * dT1_dVg - T1 * dT2_dVg) / T3; dVgsteff_dVd = (T2 * dT1_dVd - T1 * dT2_dVd) / T3; dVgsteff_dVb = (T2 * dT1_dVb - T1 * dT2_dVb) / T3; dVgsteff_dVe = (T2 * dT1_dVe - T1 * dT2_dVe) / T3; if (selfheat) /*fix below expression Wagner */ /*dVgsteff_dT = (T2 * dT1_dT - T1 * dT2_dT) / T3+ T4 * dVbseff_dT; */ dVgsteff_dT = (T2 * dT1_dT - T1 * dT2_dT) / T3; else dVgsteff_dT = 0.0; } if (here->B4SOIagbcp2 > 0) { /* VgstNVt2 = pParam->B4SOImstarcv * (Vgst - pParam->B4SOIdelvt - 1.12) / T10; */ VgstNVt2 = pParam->B4SOImstarcv * (Vgst - pParam->B4SOIdelvt - eggbcp2) / T10; /* bugfix 4.3.1 -Tanvir */ /* LFW_FD add 4 derivatives */ dVgstNVt2_dVg = (pParam->B4SOImstarcv * dVgst_dVg - VgstNVt2 * dnoff_dVg * Vtm) / T10; dVgstNVt2_dVd = (pParam->B4SOImstarcv * dVgst_dVd - VgstNVt2 * dnoff_dVd * Vtm) / T10; dVgstNVt2_dVb = (pParam->B4SOImstarcv * dVgst_dVb - VgstNVt2 * dnoff_dVb * Vtm) / T10; dVgstNVt2_dVe = (pParam->B4SOImstarcv * dVgst_dVe - VgstNVt2 * dnoff_dVe * Vtm) / T10; /* ExpArg2 = (pParam->B4SOIvoffcv - (1- pParam->B4SOImstarcv) * (Vgst - pParam->B4SOIdelvt - 1.12))/ T10; */ ExpArg2 = (pParam->B4SOIvoffcv - (1- pParam->B4SOImstarcv) * (Vgst - pParam->B4SOIdelvt - eggbcp2))/ T10; /* bugfix 4.3.1 -Tanvir */ /* LFW_FD add 4 derivatives */ dExpArg2_dVg = (-(1- pParam->B4SOImstarcv) * dVgst_dVg - ExpArg2 * dnoff_dVg * Vtm) / T10; dExpArg2_dVd = (-(1- pParam->B4SOImstarcv) * dVgst_dVd - ExpArg2 * dnoff_dVd * Vtm) / T10; dExpArg2_dVb = (-(1- pParam->B4SOImstarcv) * dVgst_dVb - ExpArg2 * dnoff_dVb * Vtm) / T10; dExpArg2_dVe = (-(1- pParam->B4SOImstarcv) * dVgst_dVe - ExpArg2 * dnoff_dVe * Vtm) / T10; /* 11 new lines Wagner */ if (selfheat) { /*fix below expression Wagner */ /*dVgstNVt2_dT = -(pParam->B4SOImstarcv*dVth_dT + VgstNVt2*dT10_dT)/T10;*/ dVgstNVt2_dT = -(-pParam->B4SOImstarcv*dVgst_dT + VgstNVt2*dT10_dT)/T10; /*fix 1st line of below expression Wagner */ /*dExpArg2_dT = (1- pParam->B4SOImstarcv)*dVth_dT/T10 */ dExpArg2_dT = -(1- pParam->B4SOImstarcv)*dVgst_dT/T10 -ExpArg2*dT10_dT/T10; } else { dT10_dT = 0.0; dVgstNVt_dT = 0.0; dExpArg_dT = 0.0; dExpArg2_dT = 0.0; } /* MCJ: Very small Vgst */ if (VgstNVt2 > EXPL_THRESHOLD) { /* Vgsteff2 = Vgst - pParam->B4SOIdelvt - 1.12; */ Vgsteff2 = Vgst - pParam->B4SOIdelvt - eggbcp2; /* bugfix 4.3.1 -Tanvir */ T0 = -dVth_dVb; /* LFW_FD fix 4 derivatives */ dVgsteff2_dVg = dVgst_dVg; dVgsteff2_dVd = dVgst_dVd; dVgsteff2_dVb = dVgst_dVb; dVgsteff2_dVe = dVgst_dVe; if (selfheat) /*fix below expression Wagner */ /*dVgsteff2_dT = -dVth_dT + T0 * dVbseff_dT;*/ dVgsteff2_dT = dVgst_dT; else dVgsteff2_dT = 0.0; } else if (ExpArg2 > EXPL_THRESHOLD) { /* T0 = (Vgst - pParam->B4SOIdelvt - pParam->B4SOIvoffcv - 1.12) / (noff * Vtm); ExpVgst2 = exp(T0); */ T0 = (Vgst - pParam->B4SOIdelvt - pParam->B4SOIvoffcv - eggbcp2) / (noff * Vtm); ExpVgst2 = exp(T0); /* bugfix 4.3.1 -Tanvir */ /*Vgsteff2 = Vtm * pParam->B4SOIcdep0 / model->B4SOIcox * ExpVgst*/ Vgsteff2 = Vtm * cdep0 / model->B4SOIcox * ExpVgst2; /*v4.2 bug fix */ T3 = Vgsteff2 / (noff * Vtm) ; /* T1 is dVgsteff2_dVbseff */ T1 = -T3 * (dVth_dVb + T0 * Vtm * dnoff_dVb); /* LFW_FD fix 4 derivatives */ dVgsteff2_dVg = Vgsteff2 * (dVgst_dVg / Vtm - T0 * dnoff_dVg) / noff; dVgsteff2_dVd = Vgsteff2 * (dVgst_dVd / Vtm - T0 * dnoff_dVd) / noff; dVgsteff2_dVb = Vgsteff2 * (dVgst_dVb / Vtm - T0 * dnoff_dVb) / noff; dVgsteff2_dVe = Vgsteff2 * (dVgst_dVe / Vtm - T0 * dnoff_dVe) / noff; if (selfheat) /* fix 1st line in below expression Wagner */ /*dVgsteff2_dT = -T3 * (dVth_dT + T0 * dVtm_dT * noff) */ dVgsteff2_dT = -T3 * (-dVgst_dT + T0 * dVtm_dT * noff) + Vgsteff2 / Temp+ T1 * dVbseff_dT; else dVgsteff2_dT = 0.0; } else { ExpVgst2 = exp(VgstNVt2); T1 = T10 * log(1.0 + ExpVgst2); /* LFW_FD fix 4 derivatives */ dT1_dVg = dnoff_dVg * T1 / noff + T10 * ExpVgst2 * dVgstNVt2_dVg / (1.0 + ExpVgst2); dT1_dVd = dnoff_dVg * T1 / noff + T10 * ExpVgst2 * dVgstNVt2_dVd / (1.0 + ExpVgst2); dT1_dVb = dnoff_dVg * T1 / noff + T10 * ExpVgst2 * dVgstNVt2_dVb / (1.0 + ExpVgst2); dT1_dVe = dnoff_dVg * T1 / noff + T10 * ExpVgst2 * dVgstNVt2_dVe / (1.0 + ExpVgst2); /*fix below expression Wagner */ /*T3 = (1.0 / Temp); */ T3 = (1.0 / Temp + dnoff_dT / noff); if (selfheat) /*fix below expression */ /*dT1_dT = -dT1_dVg * (dVth_dT + (Vgst - pParam->B4SOIdelvt - 1.12) * T3) + T1 * T3;*/ /* dT1_dT = -dT1_dVg * (-dVgst_dT + (Vgst-pParam->B4SOIdelvt-1.12) * T3) + T1 * T3; */ dT1_dT = -dT1_dVg * (-dVgst_dT + (Vgst-pParam->B4SOIdelvt-eggbcp2) * T3) + T1 * T3; /* bugfix 4.3.1 -Tanvir */ else dT1_dT = 0.0; /* dT2_dVg = -model->B4SOIcox / (Vtm * pParam->B4SOIcdep0) * exp(ExpArg2) * (1 - pParam->B4SOImstarcv);*/ dT2_dVg = -model->B4SOIcox / (Vtm * cdep0) * exp(ExpArg2) * (1 - pParam->B4SOImstarcv); /*v4.2 bug fix */ T2 = pParam->B4SOImstarcv - T10 * dT2_dVg / (1.0 - pParam->B4SOImstarcv); /* LFW_FD next 5 lines new */ TL1 = dT2_dVg; dTL1_dVg = TL1 * dExpArg2_dVg; dTL1_dVd = TL1 * dExpArg2_dVd; dTL1_dVb = TL1 * dExpArg2_dVb; dTL1_dVe = TL1 * dExpArg2_dVe; /* LFW_FD fix next 5 derivatives */ dT2_dVg = -(dnoff_dVg * Vtm * TL1 + T10 * dTL1_dVg) / (1.0 - pParam->B4SOImstarcv); dT2_dVd = -(dnoff_dVg * Vtm * TL1 + T10 * dTL1_dVd) / (1.0 - pParam->B4SOImstarcv); dT2_dVb = -(dnoff_dVg * Vtm * TL1 + T10 * dTL1_dVb) / (1.0 - pParam->B4SOImstarcv); dT2_dVe = -(dnoff_dVg * Vtm * TL1 + T10 * dTL1_dVe) / (1.0 - pParam->B4SOImstarcv); if (selfheat) dT2_dT = -(dT10_dT*TL1 +T10*TL1*(-dVtm_dT/Vtm-dcdep0_dT/cdep0+dExpArg2_dT) )/(1.0 - pParam->B4SOImstarcv); else dT2_dT = 0.0; Vgsteff2 = T1 / T2; T3 = T2 * T2; /* T4 is dVgsteff2_dVbseff */ T4 = (T2 * dT1_dVb - T1 * dT2_dVb) / T3; /* LFW_FD fix next 4 derivatives */ dVgsteff2_dVg = (T2 * dT1_dVg - T1 * dT2_dVg) / T3; dVgsteff2_dVd = (T2 * dT1_dVd - T1 * dT2_dVd) / T3; dVgsteff2_dVb = (T2 * dT1_dVb - T1 * dT2_dVb) / T3; dVgsteff2_dVe = (T2 * dT1_dVe - T1 * dT2_dVe) / T3; if (selfheat) /*fix below expression Wagner */ /*dVgsteff2_dT = (T2 * dT1_dT - T1 * dT2_dT) / T3+ T4 * dVbseff_dT; */ dVgsteff2_dT = (T2 * dT1_dT - T1 * dT2_dT) / T3; else dVgsteff2_dT = 0.0; } } } /* v3.2 */ /* v3.2 */ /* LFW_FD flexilint initializations next 9 lines */ Qsub02 = dQsub02_dVrg = dQsub02_dVg = dQsub02_dVd = dQsub02_dVb = dQsub02_dVe = dQsub02_dT = 0.0; Qac02 = dQac02_dVrg = dQac02_dVg = dQac02_dVd = dQac02_dVb = dQac02_dVe = dQac02_dT = 0.0; dqsrc_dT = 0.0; dVdseffCV2_dT = 0; T02 = dT02_dVg = dT02_dVd = dT02_dVb = dT02_dVe = 0.0; T12 = dT12_dVg = dT12_dVd = dT12_dVb = dT12_dVe = 0.0; T22 = dT22_dVg = dT22_dVd = dT22_dVb = dT22_dVe = 0.0; if (model->B4SOIcapMod == 2) { /* v3.1 */ if (here->B4SOIsoiMod == 2) /* v3.2 */ /* ideal FD */ { /* LFW_FD flexilint initializations next 4 lines */ Qac0 = dQac0_dVrg = dQac0_dVg = dQac0_dVd = dQac0_dVb = dQac0_dVe = dQac0_dT = 0.0; dQac02_dVrg = dQac02_dVg = dQac02_dVd = dQac02_dVb = dQac02_dVe = dQac02_dT = 0.0; Qsub0 = dQsub0_dVrg = dQsub0_dVg = dQsub0_dVd = dQsub0_dVb = dQsub0_dVe = dQsub0_dT = 0.0; dQsub02_dVrg = dQsub02_dVg = dQsub02_dVd = dQsub02_dVb = dQsub02_dVe = dQsub02_dT = 0.0; } else /* soiMod = 0 or 1 */ { Vfb = Vth - phi - pParam->B4SOIk1eff * sqrtPhis + pParam->B4SOIdelvt; dVfb_dVb = dVth_dVb - pParam->B4SOIk1eff * dsqrtPhis_dVb; /* LFW_FD fix/add next 3 derivatives */ dVfb_dVd = dVth_dVd - pParam->B4SOIk1eff * dsqrtPhis_dVd; dVfb_dVg = dVth_dVg - pParam->B4SOIk1eff * dsqrtPhis_dVg; dVfb_dVe = dVth_dVe - pParam->B4SOIk1eff * dsqrtPhis_dVe; /*fix below expression Wagner */ /*dVfb_dT = dVth_dT; */ dVfb_dT = dVth_dT - dphi_dT - pParam->B4SOIk1eff*dsqrtPhis_dT; V3 = Vfb - Vgs_eff + Vbseff - DELTA_3_SOI; if (Vfb <= 0.0) { T0 = sqrt(V3 * V3 - 4.0 * DELTA_3_SOI * Vfb); T2 = -DELTA_3_SOI / T0; } else { T0 = sqrt(V3 * V3 + 4.0 * DELTA_3_SOI * Vfb); T2 = DELTA_3_SOI / T0; } T1 = 0.5 * (1.0 + V3 / T0); Vfbeff = Vfb - 0.5 * (V3 + T0); /* LFW_FD fix/add next 4 derivatives */ dVfbeff_dVd = (1.0 - T1 - T2) * dVfb_dVd - T1 * dVbseff_dVd; dVfbeff_dVb = (1.0 - T1 - T2) * dVfb_dVb - T1 * dVbseff_dVb; dVfbeff_dVg = (1.0 - T1 - T2) * dVfb_dVg - T1 * (dVbseff_dVg - dVgs_eff_dVg); dVfbeff_dVe = (1.0 - T1 - T2) * dVfb_dVe - T1 * dVbseff_dVe; dVfbeff_dVrg = T1 * dVgs_eff_dVg; /*fix below expression Wagner */ /*if (selfheat) dVfbeff_dT = (1.0 - T1 - T2) * dVfb_dT; - T1*dVbseff_dT; */ if (selfheat) dVfbeff_dT = (1.0 - T1 - T2) * dVfb_dT + T1*(dVgs_eff_dT-dVbseff_dT); else dVfbeff_dT = 0.0; Qac0 = CoxWLb * (Vfbeff - Vfb); dQac0_dVrg = CoxWLb * dVfbeff_dVrg; dQac0_dVd = CoxWLb * (dVfbeff_dVd - dVfb_dVd); dQac0_dVb = CoxWLb * (dVfbeff_dVb - dVfb_dVb); /* LFW_FD add next 2 derivatives */ dQac0_dVg = CoxWLb * (dVfbeff_dVg - dVfb_dVg); dQac0_dVe = CoxWLb * (dVfbeff_dVe - dVfb_dVe); if (selfheat) dQac0_dT = CoxWLb * (dVfbeff_dT - dVfb_dT); else dQac0_dT = 0.0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' Jun 09 */ ( here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { /* Vfb2 = Vfb + 1.12; */ Vfb2 = Vfb + eggbcp2; /* bugfix 4.3.1 -Tanvir */ dVfb2_dVb = dVfb_dVb; dVfb2_dVd = dVfb_dVd; /* LFW_FD add next 2 derivatives */ dVfb2_dVg = dVfb_dVg; dVfb2_dVe = dVfb_dVe; dVfb2_dT = dVfb_dT; DELTA_3_SOI2 = DELTA_3_SOI; V3 = Vfb2 - Vgs_eff2 + Vbseff - DELTA_3_SOI2; if (Vfb2 <= 0.0) { T0 = sqrt(V3 * V3 - 100.0 * DELTA_3_SOI2 * Vfb2); T2 = -25.0 * DELTA_3_SOI2 / T0; } else { T0 = sqrt(V3 * V3 + 100.0 * DELTA_3_SOI2 * Vfb2); T2 = 25.0 * DELTA_3_SOI2 / T0; } T1 = 0.5 * (1.0 + V3 / T0); Vfbeff2 = Vfb2 - 0.5 * (V3 + T0); /* LFW_FD fix/add next 4 derivatives */ dVfbeff2_dVg = (1.0 - T2) * dVfb2_dVg - T1 * (dVfb2_dVg - dVgs_eff2_dVg + dVbseff_dVg); dVfbeff2_dVd = (1.0 - T2) * dVfb2_dVd - T1 * (dVfb2_dVd + dVbseff_dVd); dVfbeff2_dVb = (1.0 - T2) * dVfb2_dVb - T1 * (dVfb2_dVb + dVbseff_dVb); dVfbeff2_dVe = (1.0 - T2) * dVfb2_dVe - T1 * (dVfb2_dVe + dVbseff_dVe); dVfbeff2_dVrg = T1 * dVgs_eff2_dVg; /*fix below expression Wagner */ /*if (selfheat) dVfbeff2_dT = (1.0 - T1 - T2) * dVfb2_dT; */ if (selfheat) dVfbeff2_dT = (1.0 - T1 - T2) * dVfb2_dT - T1*dVfbeff2_dT; else dVfbeff2_dT = 0.0; Qac0 += CoxWLb2 * (Vfbeff2 - Vfb2); dQac02_dVrg = CoxWLb2 * dVfbeff2_dVrg; dQac02_dVd = CoxWLb2 * (dVfbeff2_dVd - dVfb2_dVd); dQac02_dVb = CoxWLb2 * (dVfbeff2_dVb - dVfb2_dVb); /* LFW_FD add next 2 derivatives */ dQac02_dVg = CoxWLb2 * (dVfbeff2_dVg - dVfb2_dVg); dQac02_dVe = CoxWLb2 * (dVfbeff2_dVe - dVfb2_dVe); if (selfheat) dQac02_dT = CoxWLb2 * (dVfbeff2_dT - dVfb2_dT); else dQac02_dT = 0.0; dQac0_dT += dQac02_dT; /* new line Wagner */ } /* end v4.1 */ T0 = 0.5 * pParam->B4SOIk1ox; T3 = Vgs_eff - Vfbeff - Vbseff - Vgsteff; if (pParam->B4SOIk1ox == 0.0) { T1 = 0.0; T2 = 0.0; } else if (T3 < 0.0) { T1 = T0 + T3 / pParam->B4SOIk1ox; T2 = CoxWLb; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWLb * T0 / T1; } Qsub0 = CoxWLb * pParam->B4SOIk1ox * (T1 - T0); /* 4.1 bug fix */ dQsub0_dVrg = T2 * (dVgs_eff_dVg - dVfbeff_dVrg); /* LFW_FD fix/add next 4 derivatives */ dQsub0_dVd = -T2 * (dVfbeff_dVd + dVbseff_dVd + dVgsteff_dVd); dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVbseff_dVg - dVgsteff_dVg); dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseff_dVb + dVgsteff_dVb); dQsub0_dVe = -T2 * (dVfbeff_dVe + dVbseff_dVe + dVgsteff_dVe); /*fix below expression Wagner */ /*if (selfheat) dQsub0_dT = -T2 * dVfbeff_dT; */ if (selfheat) dQsub0_dT = -T2 * (-dVgs_eff_dT + dVfbeff_dT + dVbseff_dT + dVgsteff_dT); else dQsub0_dT = 0.0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T3 = Vgs_eff2- Vfbeff2 - Vbseff - Vgsteff2; if (T3 < 0.0) { T1 = T0 + T3 / pParam->B4SOIk1ox; T2 = CoxWLb2; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWLb2 * T0 / T1; } Qsub0 += CoxWLb2 * pParam->B4SOIk1ox * (T1 - T0); dQsub02_dVrg = T2 * (dVgs_eff2_dVg - dVfbeff2_dVrg); /* LFW_FD fix/add next 4 derivatives */ dQsub02_dVg = T2 * (dVgs_eff2_dVg - dVfbeff2_dVg - dVbseff_dVg - dVgsteff2_dVg); dQsub02_dVd = -T2 * ( dVfbeff2_dVd + dVbseff_dVd + dVgsteff2_dVd); dQsub02_dVb = -T2 * ( dVfbeff2_dVb + dVbseff_dVb + dVgsteff2_dVb); dQsub02_dVe = -T2 * ( dVfbeff2_dVe + dVbseff_dVe + dVgsteff2_dVe); /*fix below expression Wagner */ /*if (selfheat) dQsub02_dT = -T2 * dVfbeff2_dT; */ if (selfheat) dQsub02_dT = -T2 * (dVfbeff2_dT + dVbseff_dT + dVgsteff2_dT); else dQsub02_dT = 0.0; dQsub0_dT += dQsub02_dT; /* new line Wagner */ } } /* v3.1 */ AbulkCV = Abulk0 * pParam->B4SOIabulkCVfactor; /* LFW_FD add next 3 derivatives */ dAbulkCV_dVg = pParam->B4SOIabulkCVfactor * dAbulk0_dVg; dAbulkCV_dVd = pParam->B4SOIabulkCVfactor * dAbulk0_dVd; dAbulkCV_dVe = pParam->B4SOIabulkCVfactor * dAbulk0_dVe; dAbulkCV_dVb = pParam->B4SOIabulkCVfactor * dAbulk0_dVb; dAbulkCV_dT = dAbulk0_dT * pParam->B4SOIabulkCVfactor; /* new line Wagner */ VdsatCV = Vgsteff / AbulkCV; /* LFW_FD fix/add next 4 derivatives */ dVdsatCV_dVg = (dVgsteff_dVg -VdsatCV * dAbulkCV_dVg) / AbulkCV; dVdsatCV_dVd = (dVgsteff_dVd -VdsatCV * dAbulkCV_dVd) / AbulkCV; dVdsatCV_dVb = (dVgsteff_dVb -VdsatCV * dAbulkCV_dVb) / AbulkCV; dVdsatCV_dVe = (dVgsteff_dVe -VdsatCV * dAbulkCV_dVe) / AbulkCV; V4 = VdsatCV - Vds - DELTA_4; T0 = sqrt(V4 * V4 + 4.0 * DELTA_4 * VdsatCV); VdseffCV = VdsatCV - 0.5 * (V4 + T0); T1 = 0.5 * (1.0 + V4 / T0); T2 = DELTA_4 / T0; T3 = (1.0 - T1 - T2) / AbulkCV; /* LFW_FD fix/add next 4 derivatives */ dVdseffCV_dVg = ( 1.0 - T1 - T2) * dVdsatCV_dVg; dVdseffCV_dVd = ( 1.0 - T1 - T2) * dVdsatCV_dVd + T1; dVdseffCV_dVb = ( 1.0 - T1 - T2) * dVdsatCV_dVb; dVdseffCV_dVe = ( 1.0 - T1 - T2) * dVdsatCV_dVe; /* 10 new lines Wagner */ if (selfheat) { dVdsatCV_dT = dVgsteff_dT/AbulkCV -VdsatCV*dAbulkCV_dT/AbulkCV; dTL1_dT = (V4 + 2.0 * DELTA_4) * dVdsatCV_dT / T0; dVdseffCV_dT = 0.5*dVdsatCV_dT - 0.5*dTL1_dT; } else { dVdsatCV_dT = 0; dVdseffCV_dT = 0; } /* v4.1 */ if (here->B4SOIagbcp2 > 0) { VdsatCV2 = Vgsteff2 / AbulkCV; /* LFW_FD fix/add next 4 derivatives */ dVdsatCV2_dVg = (dVgsteff2_dVg - VdsatCV2 * dAbulkCV_dVg) / AbulkCV; dVdsatCV2_dVd = (dVgsteff2_dVd - VdsatCV2 * dAbulkCV_dVd) / AbulkCV; dVdsatCV2_dVb = (dVgsteff2_dVb - VdsatCV2 * dAbulkCV_dVb) / AbulkCV; dVdsatCV2_dVe = (dVgsteff2_dVe - VdsatCV2 * dAbulkCV_dVe) / AbulkCV; V4 = VdsatCV2 - Vds - DELTA_4; T0 = sqrt(V4 * V4 + 4.0 * DELTA_4 * VdsatCV2); VdseffCV2 = VdsatCV2 - 0.5 * (V4 + T0); T1 = 0.5 * (1.0 + V4 / T0); T2 = DELTA_4 / T0; T3 = (1.0 - T1 - T2) / AbulkCV; /* LFW_FD fix/add next 4 derivatives */ dVdseffCV2_dVg = (1.0 - T1 - T2 ) * dVdsatCV2_dVg; dVdseffCV2_dVd = (1.0 - T1 - T2 ) * dVdsatCV2_dVd + T1; dVdseffCV2_dVb = (1.0 - T1 - T2 ) * dVdsatCV2_dVb; dVdseffCV2_dVe = (1.0 - T1 - T2 ) * dVdsatCV2_dVe; /* 10 new lines Wagner */ if (selfheat) { dVdsatCV2_dT = dVgsteff2_dT/AbulkCV -VdsatCV2*dAbulkCV_dT/AbulkCV; dTL1_dT = (V4 + 2.0 * DELTA_4) * dVdsatCV2_dT / T0; dVdseffCV2_dT = 0.5*dVdsatCV2_dT - 0.5*dTL1_dT; } else { dVdsatCV2_dT = 0; dVdseffCV2_dT = 0; } } /* end v4.1 */ /* v3.1 */ Cbg12 = Cbd12 = Cbb12 = Cbe12 = 0; /* LFW_FD flexilint */ dqbulk_dT = 0; /* new line Wagner */ if (here->B4SOIsoiMod == 2) /* v3.2 */ /* ideal FD */ { qbulk = Cbg1 = Cbd1 = Cbb1 = Cbe1 = 0; /* LFW_FD enhance 2 lines */ Cbg12 = Cbd12 = Cbb12 = Cbe12 = 0; /* v4.1 */ } else { T0 = AbulkCV * VdseffCV; T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1e-20); T2 = VdseffCV / T1; T3 = T0 * T2; T4 = (1.0 - 12.0 * T2 * T2 * AbulkCV); T5 = (6.0 * T0 * (4.0 * Vgsteff- T0) / (T1 * T1) - 0.5); T6 = 12.0 * T2 * T2 * Vgsteff; T7 = 1.0 - AbulkCV; qbulk = CoxWLb * T7 * (0.5 * VdseffCV - T3); T4 = -T7 * (T4 - 1.0); T5 = -T7 * T5; T6 = -(T7 * T6 + (0.5 * VdseffCV - T3)); /* LFW_FD fix next 3 lines with next 20 lines */ /* Cbg1 = CoxWLb * (T4 + T5 * dVdseffCV_dVg); */ /* Cbd1 = CoxWLb * T5 * dVdseffCV_dVd ; */ /* Cbb1 = CoxWLb * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb);*/ dT0_dVg = AbulkCV * dVdseffCV_dVg + dAbulkCV_dVg * VdseffCV; dT0_dVd = AbulkCV * dVdseffCV_dVd + dAbulkCV_dVd * VdseffCV; dT0_dVb = AbulkCV * dVdseffCV_dVb + dAbulkCV_dVb * VdseffCV; dT0_dVe = AbulkCV * dVdseffCV_dVe + dAbulkCV_dVe * VdseffCV; dT1_dVg = 12.0 * (dVgsteff_dVg - 0.5 * dT0_dVg); dT1_dVd = 12.0 * (dVgsteff_dVd - 0.5 * dT0_dVd); dT1_dVb = 12.0 * (dVgsteff_dVb - 0.5 * dT0_dVb); dT1_dVe = 12.0 * (dVgsteff_dVe - 0.5 * dT0_dVe); Cbg1 = CoxWLb * (T7 * (0.5 - T0 / T1) * dVdseffCV_dVg - T7 * VdseffCV * ((dT0_dVg - T0 * dT1_dVg / T1) / T1) - dAbulkCV_dVg * (0.5 * VdseffCV - T3) ); Cbd1 = CoxWLb * (T7 * (0.5 - T0 / T1) * dVdseffCV_dVd - T7 * VdseffCV * ((dT0_dVd - T0 * dT1_dVd / T1) / T1) - dAbulkCV_dVd * (0.5 * VdseffCV - T3) ); Cbb1 = CoxWLb * (T7 * (0.5 - T0 / T1) * dVdseffCV_dVb - T7 * VdseffCV * ((dT0_dVb - T0 * dT1_dVb / T1) / T1) - dAbulkCV_dVb * (0.5 * VdseffCV - T3) ); Cbe1 = CoxWLb * (T7 * (0.5 - T0 / T1) * dVdseffCV_dVe - T7 * VdseffCV * ((dT0_dVe - T0 * dT1_dVe / T1) / T1) - dAbulkCV_dVe * (0.5 * VdseffCV - T3) ); /* 10 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL2_dT = 12.0 * (dVgsteff_dT -0.5 * dTL1_dT); dTL3_dT = (dVdseffCV_dT - T2 * dTL2_dT) / T1; dTL4_dT = T0 * dTL3_dT + dTL1_dT * T2; dqbulk_dT = CoxWLb * (-dAbulk_dT * (0.5 * VdseffCV - T3) + T7 * (0.5 * dVdseffCV_dT - dTL4_dT)); } else dqbulk_dT = 0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T0 = AbulkCV * VdseffCV2; T1 = 12.0 * (Vgsteff2 - 0.5 * T0 + 1e-20); T2 = VdseffCV2 / T1; T3 = T0 * T2; T4 = (1.0 - 12.0 * T2 * T2 * AbulkCV); T5 = (6.0 * T0 * (4.0 * Vgsteff2 - T0) / (T1 * T1) - 0.5); T6 = 12.0 * T2 * T2 * Vgsteff2; T7 = 1.0 - AbulkCV; qbulk += CoxWLb2 * T7 * (0.5 * VdseffCV2 - T3); T4 = -T7 * (T4 - 1.0); T5 = -T7 * T5; T6 = -(T7 * T6 + (0.5 * VdseffCV2 - T3)); /* LFW_FD fix next 3 lines with next 20 lines */ /* Cbg12 = CoxWLb2 * (T4 + T5 * dVdseffCV2_dVg);*/ /* Cbd12 = CoxWLb2 * T5 * dVdseffCV2_dVd ; */ /* Cbb12 = CoxWLb2 * (T5 * dVdseffCV2_dVb + T6 * dAbulkCV_dVb);*/ dT0_dVg = AbulkCV * dVdseffCV2_dVg + dAbulkCV_dVg * VdseffCV2; dT0_dVd = AbulkCV * dVdseffCV2_dVd + dAbulkCV_dVd * VdseffCV2; dT0_dVb = AbulkCV * dVdseffCV2_dVb + dAbulkCV_dVb * VdseffCV2; dT0_dVe = AbulkCV * dVdseffCV2_dVe + dAbulkCV_dVe * VdseffCV2; dT1_dVg = 12.0 * (dVgsteff2_dVg - 0.5 * dT0_dVg); dT1_dVd = 12.0 * (dVgsteff2_dVd - 0.5 * dT0_dVd); dT1_dVb = 12.0 * (dVgsteff2_dVb - 0.5 * dT0_dVb); dT1_dVe = 12.0 * (dVgsteff2_dVe - 0.5 * dT0_dVe); Cbg12 = CoxWLb2 * (T7 * (0.5 - T0 / T1) * dVdseffCV2_dVg - T7 * VdseffCV2 * ((dT0_dVg - T0 * dT1_dVg / T1) / T1) - dAbulkCV_dVg * (0.5 * VdseffCV2 - T3) ); Cbd12 = CoxWLb2 * (T7 * (0.5 - T0 / T1) * dVdseffCV2_dVd - T7 * VdseffCV2 * ((dT0_dVd - T0 * dT1_dVd / T1) / T1) - dAbulkCV_dVd * (0.5 * VdseffCV2 - T3) ); Cbb12 = CoxWLb2 * (T7 * (0.5 - T0 / T1) * dVdseffCV2_dVb - T7 * VdseffCV2 * ((dT0_dVb - T0 * dT1_dVb / T1) / T1) - dAbulkCV_dVb * (0.5 * VdseffCV2 - T3) ); Cbe12 = CoxWLb2 * (T7 * (0.5 - T0 / T1) * dVdseffCV2_dVe - T7 * VdseffCV2 * ((dT0_dVe - T0 * dT1_dVe / T1) / T1) - dAbulkCV_dVe * (0.5 * VdseffCV2 - T3) ); /* 10 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL2_dT = 12.0 * (dVgsteff2_dT -0.5 * dTL1_dT); dTL3_dT = (dVdseffCV2_dT - T2 * dTL2_dT) / T1; dTL4_dT = T0 * dTL3_dT + dTL1_dT * T2; dqbulk_dT += CoxWLb2 * (-dAbulk_dT * (0.5 * VdseffCV2 - T3) + T7 * (0.5 * dVdseffCV2_dT - dTL4_dT)); } else dqbulk_dT += 0; } /* end v4.1 */ } /* v3.1 */ /* Total inversion charge */ T0 = AbulkCV * VdseffCV; T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1e-20); /* T2 = VdseffCV / T1; */ T2 = T0 / T1; T3 = T0 * T2; /* T4 = (1.0 - 12.0 * T2 * T2 * AbulkCV); T5 = (6.0 * T0 * (4.0 * Vgsteff - T0) / (T1 * T1) - 0.5); T6 = 12.0 * T2 * T2 * Vgsteff; */ T4 = (1.0 - 12.0 * T2 * T2);/*bug fix */ T7 = T2 * (2.0 + 6.0 * T2) - 0.5; /*bug fix */ T5 = T7 * AbulkCV; T6 = T7 * VdseffCV; /* qinv = CoxWL * (Vgsteff - 0.5 * VdseffCV + T3); */ qgate = qinv = CoxWL * (Vgsteff - 0.5 * T0 + T3); /* enhanced line Wagner */ here->B4SOIqinv = -qinv; /* for noise v3.2 */ /* LFW_FD fix next 3 lines with next 20 lines */ /* Cgg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg); */ /* Cgd1 = CoxWL * T5 * dVdseffCV_dVd; */ /* Cgb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb);*/ dT0_dVg = dAbulkCV_dVg * VdseffCV + AbulkCV * dVdseffCV_dVg; dT0_dVd = dAbulkCV_dVd * VdseffCV + AbulkCV * dVdseffCV_dVd; dT0_dVb = dAbulkCV_dVb * VdseffCV + AbulkCV * dVdseffCV_dVb; dT0_dVe = dAbulkCV_dVe * VdseffCV + AbulkCV * dVdseffCV_dVe; dT1_dVg = 12.0 * (dVgsteff_dVg - 0.5 * dT0_dVg); dT1_dVd = 12.0 * (dVgsteff_dVd - 0.5 * dT0_dVd); dT1_dVb = 12.0 * (dVgsteff_dVb - 0.5 * dT0_dVb); dT1_dVe = 12.0 * (dVgsteff_dVe - 0.5 * dT0_dVe); dT2_dVg = (dT0_dVg - T2 * dT1_dVg) / T1; dT2_dVd = (dT0_dVd - T2 * dT1_dVd) / T1; dT2_dVb = (dT0_dVb - T2 * dT1_dVb) / T1; dT2_dVe = (dT0_dVe - T2 * dT1_dVe) / T1; dT3_dVg = dT0_dVg * T2 + T0 * dT2_dVg; dT3_dVd = dT0_dVd * T2 + T0 * dT2_dVd; dT3_dVb = dT0_dVb * T2 + T0 * dT2_dVb; dT3_dVe = dT0_dVe * T2 + T0 * dT2_dVe; Cgg1 = CoxWL * (dVgsteff_dVg - 0.5 * dT0_dVg + dT3_dVg); Cgd1 = CoxWL * (dVgsteff_dVd - 0.5 * dT0_dVd + dT3_dVd); Cgb1 = CoxWL * (dVgsteff_dVb - 0.5 * dT0_dVb + dT3_dVb); Cge1 = CoxWL * (dVgsteff_dVe - 0.5 * dT0_dVe + dT3_dVe); /* 7 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL2_dT = 12 * (dVgsteff_dT - 0.5*dTL1_dT); dTL3_dT = (2 * T0 * dTL1_dT - T3 * dTL2_dT) / T1; dqgate_dT = CoxWL * (dVgsteff_dT - 0.5* dTL1_dT + dTL3_dT); } else dqgate_dT = 0; /* v4.1 */ /* LFW_FD 2 new lines per flexilint */ T12 = T02 = Cgg12 = Cgd12 = Cgb12 = Cge12 = 0.0; Csg12 = Csd12 = Csb12 = Cse12 = 0.0; dqsrc2_dT = 0; /* new line Wagner */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T02 = AbulkCV * VdseffCV2; T12 = 12.0 * (Vgsteff2 - 0.5 * T02 + 1e-20); T2 = T02 / T12; T3 = T02 * T2; T4 = (1.0 - 12.0 * T2 * T2); T7 = T2 * (2.0 + 6.0 * T2) - 0.5; T5 = T7 * AbulkCV; T6 = T7 * VdseffCV2; qinv += CoxWL2 * (Vgsteff2 - 0.5 * T02 + T3); qgate = qinv; /* new line Wagner */ here->B4SOIqinv = -qinv; /* LFW_FD fix next 3 lines with next 20 lines */ /* Cgg12 = CoxWL2 * (T4 + T5 * dVdseffCV2_dVg); */ /* Cgd12 = CoxWL2 * T5 * dVdseffCV2_dVd; */ /* Cgb12 = CoxWL2 * (T5 * dVdseffCV2_dVb + T6 * dAbulkCV_dVb);*/ dT02_dVg = dAbulkCV_dVg * VdseffCV2 + AbulkCV * dVdseffCV2_dVg; dT02_dVd = dAbulkCV_dVd * VdseffCV2 + AbulkCV * dVdseffCV2_dVd; dT02_dVb = dAbulkCV_dVb * VdseffCV2 + AbulkCV * dVdseffCV2_dVb; dT02_dVe = dAbulkCV_dVe * VdseffCV2 + AbulkCV * dVdseffCV2_dVe; dT12_dVg = 12.0 * (dVgsteff2_dVg - 0.5 * dT02_dVg); dT12_dVd = 12.0 * (dVgsteff2_dVd - 0.5 * dT02_dVd); dT12_dVb = 12.0 * (dVgsteff2_dVb - 0.5 * dT02_dVb); dT12_dVe = 12.0 * (dVgsteff2_dVe - 0.5 * dT02_dVe); dT2_dVg = (dT02_dVg - T2 * dT12_dVg) / T12; dT2_dVd = (dT02_dVd - T2 * dT12_dVd) / T12; dT2_dVb = (dT02_dVb - T2 * dT12_dVb) / T12; dT2_dVe = (dT02_dVe - T2 * dT12_dVe) / T12; dT3_dVg = dT02_dVg * T2 + T02 * dT2_dVg; dT3_dVd = dT02_dVd * T2 + T02 * dT2_dVd; dT3_dVb = dT02_dVb * T2 + T02 * dT2_dVb; dT3_dVe = dT02_dVe * T2 + T02 * dT2_dVe; Cgg12 = CoxWL2 * (dVgsteff2_dVg - 0.5 * dT02_dVg + dT3_dVg); Cgd12 = CoxWL2 * (dVgsteff2_dVd - 0.5 * dT02_dVd + dT3_dVd); Cgb12 = CoxWL2 * (dVgsteff2_dVb - 0.5 * dT02_dVb + dT3_dVb); Cge12 = CoxWL2 * (dVgsteff2_dVe - 0.5 * dT02_dVe + dT3_dVe); /* 8 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL2_dT = 12 * (dVgsteff2_dT - 0.5*dTL1_dT); dTL3_dT = (2 * T02 * dTL1_dT - T3 * dTL2_dT) / T12; dqgate2_dT = CoxWL2 * (dVgsteff2_dT - 0.5* dTL1_dT + dTL3_dT); dqgate_dT += dqgate2_dT; } else dqgate_dT = 0; } /* end v4.1 */ /* Inversion charge partitioning into S / D */ if (model->B4SOIxpart > 0.5) { /* 0/100 Charge partition model */ T1 = T1 + T1; qsrc = -CoxWL * (0.5 * Vgsteff + 0.25 * T0 - T0 * T0 / T1); T7 = (4.0 * Vgsteff - T0) / (T1 * T1); T4 = -(0.5 + 24.0 * T0 * T0 / (T1 * T1)); T5 = -(0.25 * AbulkCV - 12.0 * AbulkCV * T0 * T7); T6 = -(0.25 * VdseffCV - 12.0 * T0 * VdseffCV * T7); /* LFW_FD fix next 3 lines with next 12 lines */ /* Csg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg); */ /* Csd1 = CoxWL * T5 * dVdseffCV_dVd; */ /* Csb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb);*/ dT1_dVg = 2.0 * dT1_dVg; dT1_dVd = 2.0 * dT1_dVd; dT1_dVb = 2.0 * dT1_dVb; dT1_dVe = 2.0 * dT1_dVe; Csg1 = -CoxWL * (0.5 * dVgsteff_dVg + 0.25 * dT0_dVg - 2.0 * T0 * dT0_dVg / T1 + T0 * T0 * dT1_dVg / (T1 * T1)); Csd1 = -CoxWL * (0.5 * dVgsteff_dVd + 0.25 * dT0_dVd - 2.0 * T0 * dT0_dVd / T1 + T0 * T0 * dT1_dVd / (T1 * T1)); Csb1 = -CoxWL * (0.5 * dVgsteff_dVb + 0.25 * dT0_dVb - 2.0 * T0 * dT0_dVb / T1 + T0 * T0 * dT1_dVb / (T1 * T1)); Cse1 = -CoxWL * (0.5 * dVgsteff_dVe + 0.25 * dT0_dVe - 2.0 * T0 * dT0_dVe / T1 + T0 * T0 * dT1_dVe / (T1 * T1)); /* 8 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL2_dT = 24 * (dVgsteff_dT - 0.5*dTL1_dT); dqsrc_dT = -CoxWL*(0.5*dVgsteff_dT + 0.25*dTL1_dT - 2*T0*dTL1_dT/T1 + + T0*T0*dTL2_dT/(T1*T1) ); } else dqsrc_dT = 0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T12 = T12 + T12; /*fix below expression Wagner */ /*qsrc += -CoxWL2 * (0.5 * Vgsteff2 + 0.25 * T02 - T02 * T02 / T12); */ qsrc2 = -CoxWL2 * (0.5 * Vgsteff2 + 0.25 * T02 - T02 * T02 / T12); T7 = (4.0 * Vgsteff2 - T02) / (T12 * T12); T4 = -(0.5 + 24.0 * T02 * T02 / (T12 * T12)); T5 = -(0.25 * AbulkCV - 12.0 * AbulkCV * T02 * T7); T6 = -(0.25 * VdseffCV2 - 12.0 * T02 * VdseffCV2 * T7); /* LFW_FD fix next 3 lines with next 12 lines */ /* Csg12 = CoxWL2 * (T4 + T5 * dVdseffCV2_dVg); */ /* Csd12 = CoxWL2 * T5 * dVdseffCV2_dVd; */ /* Csb12 = CoxWL2 * (T5 * dVdseffCV2_dVb + T6 * dAbulkCV_dVb);*/ dT12_dVg = 2.0 * dT12_dVg; dT12_dVd = 2.0 * dT12_dVd; dT12_dVb = 2.0 * dT12_dVb; dT12_dVe = 2.0 * dT12_dVe; Csg12 = -CoxWL2 * (0.5 * dVgsteff2_dVg + 0.25 * dT02_dVg - 2.0 * T02 * dT02_dVg / T12 + T02 * T02 * dT12_dVg / (T12 * T12)); Csd12 = -CoxWL2 * (0.5 * dVgsteff2_dVd + 0.25 * dT02_dVd - 2.0 * T02 * dT02_dVd / T12 + T02 * T02 * dT12_dVd / (T12 * T12)); Csb12 = -CoxWL2 * (0.5 * dVgsteff2_dVb + 0.25 * dT02_dVb - 2.0 * T02 * dT02_dVb / T12 + T02 * T02 * dT12_dVb / (T12 * T12)); Cse12 = -CoxWL2 * (0.5 * dVgsteff2_dVe + 0.25 * dT02_dVe - 2.0 * T02 * dT02_dVe / T12 + T02 * T02 * dT12_dVe / (T12 * T12)); /* 11 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL2_dT = 24 * (dVgsteff2_dT - 0.5*dTL1_dT); dqsrc2_dT = -CoxWL2*(0.5*dVgsteff2_dT + 0.25*dTL1_dT - 2*T02*dTL1_dT/T12 + + T02*T02*dTL2_dT/(T12*T12) ); } else dqsrc2_dT = 0; qsrc += qsrc2; dqsrc_dT += dqsrc2_dT; } /* end v4.1 */ } else if (model->B4SOIxpart < 0.5) { /* 40/60 Charge partition model */ T1 = T1 / 12.0; T2 = 0.5 * CoxWL / (T1 * T1); T3 = Vgsteff * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0; qsrc = -T2 * T3; /* LFW_FD add next 28 lines of code */ dT1_dVg = dVgsteff_dVg - 0.5 * dT0_dVg; dT1_dVd = dVgsteff_dVd - 0.5 * dT0_dVd; dT1_dVb = dVgsteff_dVb - 0.5 * dT0_dVb; dT1_dVe = dVgsteff_dVe - 0.5 * dT0_dVe; dT2_dVg = - 2.0 * T2 * dT1_dVg / T1; dT2_dVd = - 2.0 * T2 * dT1_dVd / T1; dT2_dVb = - 2.0 * T2 * dT1_dVb / T1; dT2_dVe = - 2.0 * T2 * dT1_dVe / T1; dT3_dVg = dVgsteff_dVg * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) + Vgsteff * (4.0 * T0 *dT0_dVg /3 + dVgsteff_dVg * (Vgsteff - 4.0 * T0 / 3.0) + Vgsteff * (dVgsteff_dVg -4.0 * dT0_dVg / 3.0)) - 2.0 * T0 * T0 * dT0_dVg / 5.0; dT3_dVd = dVgsteff_dVd * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) + Vgsteff * (4.0 * T0 *dT0_dVd /3 + dVgsteff_dVd * (Vgsteff - 4.0 * T0 / 3.0) + Vgsteff * (dVgsteff_dVd -4.0 * dT0_dVd / 3.0)) - 2.0 * T0 * T0 * dT0_dVd / 5.0; dT3_dVb = dVgsteff_dVb * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) + Vgsteff * (4.0 * T0 *dT0_dVb /3 + dVgsteff_dVb * (Vgsteff - 4.0 * T0 / 3.0) + Vgsteff * (dVgsteff_dVb -4.0 * dT0_dVb / 3.0)) - 2.0 * T0 * T0 * dT0_dVb / 5.0; dT3_dVe = dVgsteff_dVe * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) + Vgsteff * (4.0 * T0 *dT0_dVe /3 + dVgsteff_dVe * (Vgsteff - 4.0 * T0 / 3.0) + Vgsteff * (dVgsteff_dVe -4.0 * dT0_dVe / 3.0)) - 2.0 * T0 * T0 * dT0_dVe / 5.0; Csg1 = - T2 * dT3_dVg - dT2_dVg * T3; Csd1 = - T2 * dT3_dVd - dT2_dVd * T3; Csb1 = - T2 * dT3_dVb - dT2_dVb * T3; Cse1 = - T2 * dT3_dVe - dT2_dVe * T3; /* 13 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL2_dT = (dVgsteff_dT - 0.5*dTL1_dT); dTL3_dT = - CoxWL * dTL2_dT / (T1 * T1 * T1); dTL4_dT = dVgsteff_dT * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0) ) + Vgsteff * (4.0 * T0 * dTL1_dT /3.0 + dVgsteff_dT * (Vgsteff - 4.0 * T0 / 3.0) + Vgsteff * (dVgsteff_dT -4.0 * dTL1_dT / 3.0) ) - 2.0 * T0 * T0 * dTL1_dT / 5.0; dqsrc_dT = -T2*dTL4_dT - dTL3_dT*T3; } else dqsrc_dT = 0; /* LFW_FD delete next 10 lines of code */ /* T7 = 4.0 / 3.0 * Vgsteff * (Vgsteff - T0) */ /* + 0.4 * T0 * T0; */ /* T4 = -2.0 * qsrc / T1 - T2 * (Vgsteff * (3.0 */ /* * Vgsteff - 8.0 * T0 / 3.0) */ /* + 2.0 * T0 * T0 / 3.0); */ /* T5 = (qsrc / T1 + T2 * T7) * AbulkCV; */ /* T6 = (qsrc / T1 * VdseffCV + T2 * T7 * VdseffCV);*/ /* Csg1 = T4 + T5 * dVdseffCV_dVg; */ /* Csd1 = T5 * dVdseffCV_dVd; */ /* Csb1 = T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb; */ /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 >0) { T12 = T12 /12.0; T2 = 0.5 * CoxWL2 / (T12 * T12); T3 = Vgsteff2 * (2.0 * T02 * T02 / 3.0 + Vgsteff2 * (Vgsteff2 - 4.0 * T02 / 3.0)) - 2.0 * T02 * T02 * T02 / 15.0; qsrc2 = -T2 * T3; T7 = 4.0 / 3.0 * Vgsteff2 * (Vgsteff2 - T02) + 0.4 * T02 * T02; T4 = -2.0 * qsrc2 / T12 - T2 * (Vgsteff2 * (3.0 * Vgsteff2 - 8.0 * T02 / 3.0) + 2.0 * T02 * T02 / 3.0); T5 = (qsrc2 / T12 + T2 * T7) * AbulkCV; T6 = (qsrc2 / T12 * VdseffCV2 + T2 * T7 * VdseffCV2); /* LFW_FD fix next 3 lines with next 28 lines */ /* Csg12 = T4 + T5 * dVdseffCV2_dVg; */ /* Csd12 = T5 * dVdseffCV2_dVd; */ /* Csb12 = T5 * dVdseffCV2_dVb + T6 * dAbulkCV_dVb;*/ dT12_dVg = dVgsteff2_dVg - 0.5 * dT02_dVg; dT12_dVd = dVgsteff2_dVd - 0.5 * dT02_dVd; dT12_dVb = dVgsteff2_dVb - 0.5 * dT02_dVb; dT12_dVe = dVgsteff2_dVe - 0.5 * dT02_dVe; dT2_dVg = - 2.0 * T2 * dT12_dVg / T12; dT2_dVd = - 2.0 * T2 * dT12_dVd / T12; dT2_dVb = - 2.0 * T2 * dT12_dVb / T12; dT2_dVe = - 2.0 * T2 * dT12_dVe / T12; dT3_dVg = dVgsteff2_dVg * (2.0 * T02 * T02 / 3.0 + Vgsteff2 * (Vgsteff2 - 4.0 * T02 / 3.0)) + Vgsteff2 * (4.0 * T02 *dT02_dVg /3 + dVgsteff2_dVg * (Vgsteff2 - 4.0 * T02 / 3.0) + Vgsteff2 * (dVgsteff2_dVg -4.0 * dT02_dVg / 3.0)) - 2.0 * T02 * T02 * dT02_dVg / 5.0; dT3_dVd = dVgsteff2_dVd * (2.0 * T02 * T02 / 3.0 + Vgsteff2 * (Vgsteff2 - 4.0 * T02 / 3.0)) + Vgsteff2 * (4.0 * T02 *dT02_dVd /3 + dVgsteff2_dVd * (Vgsteff2 - 4.0 * T02 / 3.0) + Vgsteff2 * (dVgsteff2_dVd -4.0 * dT02_dVd / 3.0)) - 2.0 * T02 * T02 * dT02_dVd / 5.0; dT3_dVb = dVgsteff2_dVb * (2.0 * T02 * T02 / 3.0 + Vgsteff2 * (Vgsteff2 - 4.0 * T02 / 3.0)) + Vgsteff2 * (4.0 * T02 *dT02_dVb /3 + dVgsteff2_dVb * (Vgsteff2 - 4.0 * T02 / 3.0) + Vgsteff2 * (dVgsteff2_dVb -4.0 * dT02_dVb / 3.0)) - 2.0 * T02 * T02 * dT02_dVb / 5.0; dT3_dVe = dVgsteff2_dVe * (2.0 * T02 * T02 / 3.0 + Vgsteff2 * (Vgsteff2 - 4.0 * T02 / 3.0)) + Vgsteff2 * (4.0 * T02 *dT02_dVe /3 + dVgsteff2_dVe * (Vgsteff2 - 4.0 * T02 / 3.0) + Vgsteff2 * (dVgsteff2_dVe -4.0 * dT02_dVe / 3.0)) - 2.0 * T02 * T02 * dT02_dVe / 5.0; Csg12 = - T2 * dT3_dVg - dT2_dVg * T3; Csd12 = - T2 * dT3_dVd - dT2_dVd * T3; Csb12 = - T2 * dT3_dVb - dT2_dVb * T3; Cse12 = - T2 * dT3_dVe - dT2_dVe * T3; /* 13 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL2_dT = (dVgsteff2_dT - 0.5*dTL1_dT); dTL3_dT = - CoxWL2 * dTL2_dT / (T12 * T12 * T12); dTL4_dT = dVgsteff2_dT * (2.0 * T02 * T02 / 3.0 + Vgsteff2 * (Vgsteff2 - 4.0 * T02 / 3.0) ) + Vgsteff2 * (4.0 * T02 * dTL1_dT /3.0 + dVgsteff2_dT * (Vgsteff2 - 4.0 * T02 / 3.0) + Vgsteff2 * (dVgsteff2_dT -4.0 * dTL1_dT / 3.0) ) - 2.0 * T02 * T02 * dTL1_dT /5.0; dqsrc2_dT = -T2*dTL4_dT - dTL3_dT*T3; } else dqsrc2_dT = 0; qsrc += qsrc2; dqsrc_dT += dqsrc2_dT; /* new line Wagner */ } /* end v4.1 */ } else { /* 50/50 Charge partition model */ qsrc = - 0.5 * (qinv + qbulk); Csg1 = - 0.5 * (Cgg1 + Cbg1); Csb1 = - 0.5 * (Cgb1 + Cbb1); Csd1 = - 0.5 * (Cgd1 + Cbd1); Cse1 = - 0.5 * (Cge1 + Cbe1); /* LFW_FD new line */ /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 >0) { Csg12 = -0.5 * (Cgg12 + Cbg12); Csb12 = -0.5 * (Cgb12 + Cbb12); Csd12 = -0.5 * (Cgd12 + Cbd12); Cse12 = -0.5 * (Cge12 + Cbe12); /* LFW_FD new line */ } dqsrc_dT = -0.5 * (dqgate_dT + dqbulk_dT); /* new line Wagner */ /* end v4.1 */ } /* Backgate charge */ /* v3.1 */ if (here->B4SOIsoiMod == 2) /* v3.2 */ /* ideal FD */ { Qe1 = dQe1_dVb = dQe1_dVe = Ce1T = dQe1_dT = 0; /* enhanced line Wagner */ } else /* soiMod = 0 or 1 */ { CboxWL = pParam->B4SOIkb1 * model->B4SOIfbody * Cbox * (pParam->B4SOIweffCV / here->B4SOInseg * here->B4SOInf /* bugfix_snps nf*/ * pParam->B4SOIleffCVbg + here->B4SOIaebcp); Qe1 = CboxWL * (Vesfb - Vbs); dQe1_dVb = -CboxWL; dQe1_dVe = CboxWL; if (selfheat) Ce1T = dQe1_dT = -CboxWL * dvfbb_dT; /* enhanced line Wagner */ else dQe1_dT = 0; } /* v3.1 */ qgate = qinv + Qac0 + Qsub0; /* LFW_FD commentary only; next 2 lines */ /* Correct definition of qgate below. Not used because it changes CMC defined model.*/ /* qgate = qinv + Qac0 + Qsub0 - qbulk;*/ qbody = (qbulk - Qac0 - Qsub0 - Qe1); qsub = Qe1; qdrn = -(qgate + qsrc + qbody + qsub); /* 4 new lines Wagner */ dqgate_dT = dqgate_dT + dQac0_dT + dQsub0_dT; dqbody_dT = (dqbulk_dT - dQac0_dT - dQsub0_dT - dQe1_dT); dqsub_dT = dQe1_dT; dqdrn_dT = -(dqgate_dT + dqsrc_dT + dqbody_dT + dqsub_dT); /* This transform all the dependency on Vgsteff, Vbseff into real ones */ Ce1b = dQe1_dVb; Ce1e = dQe1_dVe; /* LFW_FD fix/add next 4 lines */ Csg = Csg1; Csd = Csd1; Csb = Csb1; Cse = Cse1; /*fix expression below Wagner */ /*if (selfheat) CsT = Csg1 * dVgsteff_dT;*/ if (selfheat) CsT = dqsrc_dT; else CsT = 0.0; /* LFW_FD fix/add next 4 lines */ Cgg = Cgg1 + dQsub0_dVg + dQac0_dVg; Cgd = Cgd1 + dQsub0_dVd + dQac0_dVd; Cgb = Cgb1 + dQsub0_dVb + dQac0_dVb; Cge = Cge1 + dQsub0_dVe + dQac0_dVe; /* LFW_FD commentary only; next 5 lines */ /* Use these with correct definition of qgate above */ /* Cgg = Cgg1 + dQsub0_dVg + dQac0_dVg - Cbg1; */ /* Cgd = Cgd1 + dQsub0_dVd + dQac0_dVd - Cbd1; */ /* Cgb = Cgb1 + dQsub0_dVb + dQac0_dVb - Cbb1; */ /* Cge = Cge1 + dQsub0_dVe + dQac0_dVe - Cbe1; */ if (selfheat) /*fix expression below Wagner */ /*CgT = (Cgg1 + dQsub0_dVg) * dVgsteff_dT + dQac0_dT + dQsub0_dT;*/ CgT = dqgate_dT; else CgT = 0.0; /* LFW_FD fix/add next 4 lines */ Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg; Cbd = Cbd1 - dQac0_dVd - dQsub0_dVd; Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb - Ce1b; Cbe = Cbe1 - dQac0_dVe - dQsub0_dVe - Ce1e; if (selfheat) /*fix expression below Wagner */ /*CbT = (Cbg1 - dQsub0_dVg) * dVgsteff_dT - dQac0_dT - dQsub0_dT - dQe1_dT;*/ CbT = dqbody_dT; else CbT = 0.0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 >0) { /* LFW_FD fixed next 12 lines */ Csg += Csg12; Csd += Csd12; Csb += Csb12; Cse += Cse12; Cgg += Cgg12 + dQsub02_dVg + dQac02_dVg; Cgd += Cgd12 + dQsub02_dVd + dQac02_dVd; Cgb += Cgb12 + dQsub02_dVb + dQac02_dVb; Cge += Cge12 + dQsub02_dVe + dQac02_dVe; Cbg += Cbg12 - dQac02_dVg - dQsub02_dVg; Cbd += Cbd12 - dQac02_dVd - dQsub02_dVd; Cbb += Cbb12 - dQac02_dVb - dQsub02_dVb; Cbe += Cbe12 - dQac02_dVe - dQsub02_dVe; } /* end v4.1 */ here->B4SOIcggb = Cgg ; here->B4SOIcgsb = - (Cgg + Cgd + Cgb + Cge); /* LFW_FD fixed line */ here->B4SOIcgdb = Cgd; here->B4SOIcgeb = Cge; /* LFW_FD new line */ here->B4SOIcgT = CgT; here->B4SOIcbgb = Cbg; here->B4SOIcbsb = -(Cbg + Cbd + Cbb + Cbe); /* LFW_FD fixed line */ here->B4SOIcbdb = Cbd; here->B4SOIcbeb = Cbe; /* LFW_FD fixed line */ here->B4SOIcbT = CbT; here->B4SOIceeb = Ce1e ; here->B4SOIceT = dQe1_dT; here->B4SOIcdgb = -(Cgg + Cbg + Csg); here->B4SOIcddb = -(Cgd + Cbd + Csd); here->B4SOIcdeb = -(Cge + Cse + Cbe) - Ce1e; /* LFW_FD fixed line */ here->B4SOIcdT = -(CgT + CbT + CsT) - dQe1_dT; here->B4SOIcdsb = Cgg + Cgd + Cgb + Cge /* LFW_FD fixed expression */ + Cbg + Cbd + Cbb + Cbe + Ce1e + Csg + Csd + Csb + Cse + Ce1b; } /* End of if capMod == 2 */ else if (model->B4SOIcapMod == 3) { /* dVgsteff_dVb /= dVbseff_dVb; LFW_FD comment out line */ if(model->B4SOImtrlMod == 0) Cox = 3.453133e-11 / model->B4SOItoxp; else Cox = epsrox * EPS0 / model->B4SOItoxp; CoxWL *= toxe/ model->B4SOItoxp; CoxWLb *= model->B4SOItox/ model->B4SOItoxp; Tox=1.0e8*model->B4SOItoxp; /* v4.1 */ if (here->B4SOIagbcp2 > 0) { /* dVgsteff2_dVb /= dVbseff_dVb; LFW_FD comment out line */ CoxWL2 *= model->B4SOItox / model->B4SOItoxp; CoxWLb2 *= model->B4SOItox/ model->B4SOItoxp; } /* end v4.1 */ /* v3.1 */ /* LFW_FD flexilint inits next 7 lines */ Vfbzb = pParam->B4SOIvfbzb + pParam->B4SOIdelvt; dVfbzb_dT = 0.0; Vfbzb2 = dVfbzb2_dT = 0.0; Tcen2 = dTcen2_dVg = dTcen2_dVd = dTcen2_dVb = dTcen2_dVe = dTcen2_dT = 0.0; Coxeff2 = dCoxeff2_dVg = dCoxeff2_dVd = dCoxeff2_dVb = dCoxeff2_dVe = dCoxeff2_dT = 0.0; CoxWLcenb2= dCoxWLcenb2_dT= 0.0; dDeltaPhi2_dT = 0.0; if (here->B4SOIsoiMod == 2) /* v3.2 */ /* ideal FD */ { /* LFW_FD enhance next 4 lines */ Qac0 = dQac0_dVg = dQac0_dVb = dQac0_dVd = dQac0_dVe = dQac0_dT = 0.0; dQac02_dVg = dQac02_dVb = dQac02_dVd = dQac02_dVe = dQac02_dT = 0.0; Qsub0 = dQsub0_dVg = dQsub0_dVd = dQsub0_dVb = dQsub0_dVe = dQsub0_dT = 0.0; dQsub02_dVg = dQsub02_dVd = dQsub02_dVb = dQsub02_dVe = dQsub02_dT = 0.0; Vfbzb = dVfbzb_dT = 0; /* v4.2 bug fix # 20 */ } else /* soiMod = 0 or 1 */ { if (selfheat) { Vfbzb = Vthzb - phi - pParam->B4SOIk1eff * sqrtPhi + pParam->B4SOIdelvt; /*fix expression below Wagner */ /*dVfbzb_dT = dVthzb_dT;*/ dVfbzb_dT = dVthzb_dT - dphi_dT - pParam->B4SOIk1eff*dsqrtPhi_dT; } else { Vfbzb = here->B4SOIvfbzb + pParam->B4SOIdelvt; dVfbzb_dT = 0; } V3 = Vfbzb - Vgs_eff + Vbseff - DELTA_3; if (Vfbzb <= 0.0) { T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * Vfbzb); T2 = -DELTA_3 / T0; /* dTL0_dT = (V3 * dTL3_dT - 2.0 * DELTA_3 * dVfbzb_dT) / T0; LFW_FD delete line */ } else { T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * Vfbzb); T2 = DELTA_3 / T0; } T1 = 0.5 * (1.0 + V3 / T0); Vfbeff = Vfbzb - 0.5 * (V3 + T0); /* LFW_FD fix/add next 4 lines */ dVfbeff_dVg = T1 * (dVgs_eff_dVg - dVbseff_dVg); dVfbeff_dVd = T1 * ( - dVbseff_dVd); dVfbeff_dVb = T1 * ( - dVbseff_dVb); dVfbeff_dVe = T1 * ( - dVbseff_dVe); /*fix expression below Wagner */ /*if (selfheat) dVfbeff_dT = (1.0 - T1 - T2) * dVfbzb_dT; - T1*dVbseff_dT; */ if (selfheat) dVfbeff_dT = (1.0 - T1 - T2) * dVfbzb_dT + T1*(dVgs_eff_dT - dVbseff_dT); else dVfbeff_dT = 0.0; /* v4.1 */ if (here->B4SOIagbcp2 >0) { /* Vfbzb2 = Vfbzb + 1.12; */ Vfbzb2 = Vfbzb + eggbcp2; /* bugfix v4.3.1 -Tanvir */ if (selfheat) dVfbzb2_dT = dVfbzb_dT; else dVfbzb2_dT = 0; V3 = Vfbzb2 - Vgs_eff2 + Vbseff - DELTA_3; if (Vfbzb2 <= 0.0) /* Bug fix #12 Jun 09 Vfbzb changed to Vfbzb2 */ { T0 = sqrt(V3 * V3 - 100.0 * DELTA_3 * Vfbzb2); /* Value of 100 instead of 4 is used to make transition smooth*/ T2 = -25.0 * DELTA_3 / T0; /* p+/p has same smoothness as n+/p with 100, 4 makes it too steep*/ } else { T0 = sqrt(V3 * V3 + 100.0 * DELTA_3 * Vfbzb2); T2 = 25.0 * DELTA_3 / T0; } T1 = 0.5 * (1.0 + V3 / T0); Vfbeff2 = Vfbzb2 - 0.5 * (V3 + T0); /* LFW_FD fix/add next 4 lines */ dVfbeff2_dVg = T1 * (dVgs_eff2_dVg - dVbseff_dVg); dVfbeff2_dVd = T1 * ( - dVbseff_dVd); dVfbeff2_dVb = T1 * ( - dVbseff_dVb); dVfbeff2_dVe = T1 * ( - dVbseff_dVe); /*fix expression below Wagner */ /*if (selfheat) dVfbeff2_dT = (1.0 - T1 - T2) * dVfbzb2_dT;*/ if (selfheat) dVfbeff2_dT = (1.0 - T1 - T2) * dVfbzb2_dT - T1*dVbseff_dT; else dVfbeff2_dT = 0.0; } /* end v4.1 */ T0 = (Vgs_eff - Vbseff - Vfbzb) / Tox; /* LFW_FD fix/add next 4 lines */ dT0_dVg = (dVgs_eff_dVg - dVbseff_dVg) /Tox; dT0_dVd = - dVbseff_dVd /Tox; dT0_dVb = - dVbseff_dVb /Tox; dT0_dVe = - dVbseff_dVe /Tox; tmp = T0 * pParam->B4SOIacde; if ((-EXPL_THRESHOLD < tmp) && (tmp < EXPL_THRESHOLD)) { Tcen = pParam->B4SOIldeb * exp(tmp); /* LFW_FD fix/add next 5 lines */ TL1 = pParam->B4SOIacde * Tcen; dTcen_dVg = TL1 * dT0_dVg; dTcen_dVd = TL1 * dT0_dVd; dTcen_dVb = TL1 * dT0_dVb; dTcen_dVe = TL1 * dT0_dVe; if (selfheat) /* fix below expression Wagner */ /*dTcen_dT = -Tcen * pParam->B4SOIacde * dVfbzb_dT / Tox; */ dTcen_dT = Tcen * pParam->B4SOIacde * (dVgs_eff_dT-dVbseff_dT-dVfbzb_dT) / Tox; else dTcen_dT = 0; } else if (tmp <= -EXPL_THRESHOLD) { Tcen = pParam->B4SOIldeb * MIN_EXPL; dTcen_dVg = dTcen_dVb = dTcen_dVd = dTcen_dVe = dTcen_dT = 0.0; /* LFW_FD enhance line */ } else { Tcen = pParam->B4SOIldeb * MAX_EXPL; dTcen_dVg = dTcen_dVb = dTcen_dVd = dTcen_dVe = dTcen_dT = 0.0; /* LFW_FD enhance line */ } /*LINK = 1.0e-3 * (toxe - model->B4SOIdtoxcv); v2.2.3 */ LINK = 1.0e-3 * model->B4SOItoxp; V3 = pParam->B4SOIldeb - Tcen - LINK; V4 = sqrt(V3 * V3 + 4.0 * LINK * pParam->B4SOIldeb); Tcen = pParam->B4SOIldeb - 0.5 * (V3 + V4); T1 = 0.5 * (1.0 + V3 / V4); /* v4.1 small Tcen can introduce numerical issue */ if (Tcen < 1e-15) { Tcen = 1e-15; T1 = 0; } /* end */ dTcen_dVg *= T1; dTcen_dVb *= T1; dTcen_dVd *= T1; /* LFW_FD new line */ dTcen_dVe *= T1; /* LFW_FD new line */ if (selfheat) dTcen_dT *= T1; else dTcen_dT = 0; /* v4.1 */ if (here->B4SOIagbcp2 > 0) { T0 = (Vgs_eff2 - Vbseff - Vfbzb2) / Tox; /* LFW_FD fix/add next 4 lines */ dT0_dVg = (dVgs_eff2_dVg - dVbseff_dVg) / Tox; dT0_dVd = -dVbseff_dVd / Tox; dT0_dVb = -dVbseff_dVb / Tox; dT0_dVe = -dVbseff_dVe / Tox; tmp = T0 * pParam->B4SOIacde; if ((-EXPL_THRESHOLD < tmp) && (tmp < EXPL_THRESHOLD)) { Tcen2 = pParam->B4SOIldeb * exp(tmp); /* LFW_FD fix/add next 4 lines */ dTcen2_dVg = pParam->B4SOIacde * Tcen2 * dT0_dVg; dTcen2_dVd = pParam->B4SOIacde * Tcen2 * dT0_dVd; dTcen2_dVb = pParam->B4SOIacde * Tcen2 * dT0_dVb; dTcen2_dVe = pParam->B4SOIacde * Tcen2 * dT0_dVe; if (selfheat) dTcen2_dT = -Tcen2 * pParam->B4SOIacde * dVfbzb2_dT / Tox; else dTcen2_dT = 0; } else if (tmp <= -EXPL_THRESHOLD) { Tcen2 = pParam->B4SOIldeb * MIN_EXPL; dTcen2_dVg = dTcen2_dVd = dTcen2_dVb = dTcen2_dVe = dTcen2_dT = 0.0; /* LFW_FD enhance line */ } else { Tcen2 = pParam->B4SOIldeb * MAX_EXPL; dTcen2_dVg = dTcen2_dVd = dTcen2_dVb = dTcen2_dVe = dTcen2_dT = 0.0; /* LFW_FD enhance line */ } V3 = pParam->B4SOIldeb - Tcen2 - LINK; V4 = sqrt(V3 * V3 + 4.0 * LINK * pParam->B4SOIldeb); Tcen2 = pParam->B4SOIldeb - 0.5 * (V3 + V4); T1 = 0.5 * (1.0 + V3 / V4); if (Tcen2 < 1e-15) { Tcen2 = 1e-15; T1 = 0; } dTcen2_dVg *= T1; dTcen2_dVb *= T1; dTcen2_dVd *= T1; /* LFW_FD new line */ dTcen2_dVe *= T1; /* LFW_FD new line */ if (selfheat) dTcen2_dT *= T1; else dTcen2_dT = 0; } /* end v4.1 */ Ccen = epssub / Tcen; T2 = Cox / (Cox + Ccen); Coxeff = T2 * Ccen; T3 = -Ccen / Tcen; /* LFW_FD fix/add next 5 lines */ TL1 = T2 * T2 * T3; dCoxeff_dVg = TL1 * dTcen_dVg; dCoxeff_dVd = TL1 * dTcen_dVd; dCoxeff_dVb = TL1 * dTcen_dVb; dCoxeff_dVe = TL1 * dTcen_dVe; if (selfheat) /*fix expression below Wagner */ /*dCoxeff_dT = T3 * dTcen_dT * (T2 - Coxeff / (Cox + Ccen));*/ dCoxeff_dT = - Coxeff * T2 * dTcen_dT / Tcen; else dCoxeff_dT = 0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { /* Ccen2 = EPSSI / Tcen2; */ /* Bug Fix # 30 Jul09 EPSSI changed to epssub */ Ccen2 = epssub / Tcen2; T2 = Cox / (Cox + Ccen2); Coxeff2 = T2 * Ccen2; T3 = -Ccen2 / Tcen2; /* LFW_FD fix/add next 5 lines */ TL1 = T2 * T2 * T3; dCoxeff2_dVg = TL1 * dTcen2_dVg; dCoxeff2_dVd = TL1 * dTcen2_dVd; dCoxeff2_dVb = TL1 * dTcen2_dVb; dCoxeff2_dVe = TL1 * dTcen2_dVe; if (selfheat) /*fix expression below Wagner */ /*dCoxeff2_dT = T3 * dTcen2_dT * (T2 - Coxeff2 / (Cox + Ccen2));*/ dCoxeff2_dT = - Coxeff2 * T2 * dTcen2_dT / Tcen2; else dCoxeff2_dT = 0; } /* end v4.1 */ CoxWLcenb = CoxWLb * Coxeff / Cox; if (selfheat) dCoxWLcenb_dT = CoxWLb * dCoxeff_dT / Cox; else dCoxWLcenb_dT = 0; /* v4.1 */ if (here->B4SOIagbcp2 > 0) { CoxWLcenb2 = CoxWLb2 * Coxeff2 / Cox; if (selfheat) dCoxWLcenb2_dT = CoxWLb2 * dCoxeff2_dT / Cox; else dCoxWLcenb2_dT = 0; } /* end v4.1 */ Qac0 = CoxWLcenb * (Vfbeff - Vfbzb); QovCox = Qac0 / Coxeff; /* LFW_FD fix/add next 4 lines */ dQac0_dVg = CoxWLcenb * dVfbeff_dVg + QovCox * dCoxeff_dVg; dQac0_dVb = CoxWLcenb * dVfbeff_dVb + QovCox * dCoxeff_dVb; dQac0_dVd = CoxWLcenb * dVfbeff_dVd + QovCox * dCoxeff_dVd; dQac0_dVe = CoxWLcenb * dVfbeff_dVe + QovCox * dCoxeff_dVe; if (selfheat) dQac0_dT = CoxWLcenb * (dVfbeff_dT - dVfbzb_dT) + dCoxWLcenb_dT * (Vfbeff - Vfbzb); else dQac0_dT = 0.0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { Qac02 = CoxWLcenb2 * (Vfbeff2 - Vfbzb2); QovCox2 = Qac02 / Coxeff2; /* LFW_FD fix/add next 4 lines */ dQac02_dVg = CoxWLcenb2 * dVfbeff2_dVg + QovCox2 * dCoxeff2_dVg; dQac02_dVd = CoxWLcenb2 * dVfbeff2_dVd + QovCox2 * dCoxeff2_dVd; dQac02_dVb = CoxWLcenb2 * dVfbeff2_dVb + QovCox2 * dCoxeff2_dVb; dQac02_dVe = CoxWLcenb2 * dVfbeff2_dVe + QovCox2 * dCoxeff2_dVe; if (selfheat) dQac02_dT = CoxWLcenb2 * (dVfbeff2_dT - dVfbzb2_dT) + dCoxWLcenb2_dT * (Vfbeff2 - Vfbzb2); else dQac02_dT = 0.0; Qac0 += Qac02; dQac0_dT += dQac02_dT; /* new line Wagner */ } /* end v4.1 */ T0 = 0.5 * pParam->B4SOIk1ox; T3 = Vgs_eff - Vfbeff - Vbseff - Vgsteff; if (pParam->B4SOIk1ox == 0.0) { T1 = 0.0; T2 = 0.0; } else if (T3 < 0.0) { T1 = T0 + T3 / pParam->B4SOIk1ox; T2 = CoxWLcenb; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWLcenb * T0 / T1; } Qsub0 = CoxWLcenb * pParam->B4SOIk1ox * (T1 - T0); QovCox = Qsub0 / Coxeff; /* LFW_FD fix/add next 4 lines */ dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVbseff_dVg - dVgsteff_dVg) + QovCox * dCoxeff_dVg; dQsub0_dVd = -T2 * (dVfbeff_dVd + dVbseff_dVd + dVgsteff_dVd) + QovCox * dCoxeff_dVd; dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseff_dVb + dVgsteff_dVb) + QovCox * dCoxeff_dVb; dQsub0_dVe = -T2 * (dVfbeff_dVe + dVbseff_dVe + dVgsteff_dVe) + QovCox * dCoxeff_dVe; if (selfheat) /*fix 1st line of expression below Wagner */ /*dQsub0_dT = -T2 * (dVfbeff_dT + dVgsteff_dT)*/ dQsub0_dT = T2 * (dVgs_eff_dT - dVfbeff_dT - dVbseff_dT - dVgsteff_dT) + dCoxWLcenb_dT * pParam->B4SOIk1ox * (T1 - T0); else dQsub0_dT = 0.0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T3 = Vgs_eff2 - Vfbeff2 - Vbseff - Vgsteff2; if (pParam->B4SOIk1ox == 0.0) { T1 = 0.0; T2 = 0.0; } else if (T3 < 0.0) { T1 = T0 + T3 / pParam->B4SOIk1ox; T2 = CoxWLcenb2; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWLcenb2 * T0 / T1; } Qsub02 = CoxWLcenb2 * pParam->B4SOIk1ox * (T1 - T0); QovCox2 = Qsub02 / Coxeff2; /* LFW_FD fix/add next 4 lines */ dQsub02_dVg = T2 * (dVgs_eff2_dVg - dVfbeff2_dVg - dVbseff_dVg - dVgsteff2_dVg) + QovCox2 * dCoxeff2_dVg; dQsub02_dVd = -T2 * (dVfbeff2_dVd + dVbseff_dVd + dVgsteff2_dVd) + QovCox2 * dCoxeff2_dVd; dQsub02_dVb = -T2 * (dVfbeff2_dVb + dVbseff_dVb + dVgsteff2_dVb) + QovCox2 * dCoxeff2_dVb; dQsub02_dVe = -T2 * (dVfbeff2_dVe + dVbseff_dVe + dVgsteff2_dVe) + QovCox2 * dCoxeff2_dVe; if (selfheat) dQsub02_dT = -T2 * (dVfbeff2_dT + dVgsteff2_dT) + dCoxWLcenb2_dT * pParam->B4SOIk1ox * (T1 - T0); else dQsub02_dT = 0.0; Qsub0 += Qsub02; dQsub0_dT += dQsub02_dT; /* new line Wagner */ } /* end v4.1 */ } /* v3.1 */ /* Gate-bias dependent delta Phis begins */ if (pParam->B4SOIk1ox <= 0.0) { Denomi = 0.25 * pParam->B4SOImoin * Vtm; T0 = 0.5 * pParam->B4SOIsqrtPhi; } else { Denomi = pParam->B4SOImoin * Vtm * pParam->B4SOIk1ox * pParam->B4SOIk1ox; T0 = pParam->B4SOIk1ox * pParam->B4SOIsqrtPhi; } T1 = 2.0 * T0 + Vgsteff; DeltaPhi = Vtm * log(1.0 + T1 * Vgsteff / Denomi); /* LFW_FD fix/add next 5 lines */ dDeltaPhi_dVg = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff) * dVgsteff_dVg; dDeltaPhi_dVd = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff) * dVgsteff_dVd; dDeltaPhi_dVb = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff) * dVgsteff_dVb; dDeltaPhi_dVe = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff) * dVgsteff_dVe; DeltaPhi2 = dDeltaPhi2_dVg = dDeltaPhi2_dVd = dDeltaPhi2_dVb = dDeltaPhi2_dVe = 0.0; /* flexilint */ /* 7 new lines Wagner */ if (selfheat) { TL1 = 1.0 + T1 * Vgsteff / Denomi; dTL1_dT = (2*(T0+Vgsteff)*dVgsteff_dT/Denomi) - (T1 * Vgsteff / (Denomi*Vtm))*dVtm_dT; dDeltaPhi_dT = dVtm_dT * log(TL1) + (Vtm/TL1)*dTL1_dT; } else dDeltaPhi_dT = 0.0; /* v4.1 */ if (here->B4SOIagbcp2 > 0) { T1 = 2.0 * T0 + Vgsteff2; DeltaPhi2 = Vtm * log(1.0 + T1 * Vgsteff2 / Denomi); /* LFW_FD fix/add next 4 lines */ dDeltaPhi2_dVg = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff2) * dVgsteff2_dVg; dDeltaPhi2_dVd = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff2) * dVgsteff2_dVd; dDeltaPhi2_dVb = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff2) * dVgsteff2_dVb; dDeltaPhi2_dVe = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff2) * dVgsteff2_dVe; /* 7 new lines Wagner */ if (selfheat) { TL1 = 1.0 + T1 * Vgsteff2 / Denomi; dTL1_dT = (2*(T0+Vgsteff2)*dVgsteff2_dT/Denomi) - (T1 * Vgsteff2 / (Denomi*Vtm))*dVtm_dT; dDeltaPhi2_dT = dVtm_dT * log(TL1) + (Vtm/TL1)*dTL1_dT; } else dDeltaPhi2_dT = 0.0; } /* end v4.1 */ /* End of delta Phis */ /* v3.1.1 bug fix for discontinuity */ T3 = 4.0 * (Vth - Vfbzb - phi); T2 = sqrt(T3*T3 + 0.0001); T5 = 0.5 * (1 + T3/T2); T4 = 0.5 * (T3 + T2); Tox += Tox; T0 = (Vgsteff + T4) / Tox; tmp = exp(0.7 * log(T0)); T1 = 1.0 + tmp; T2 = 0.7 * tmp / (T0 * Tox); Tcen = 1.9e-9 / T1; /* LFW_FD fix/add next 5 lines */ TL1 = dTcen_dVg = -Tcen * T2 / T1; dTcen_dVg = TL1 * (T5 * 4.0 * dVth_dVg + dVgsteff_dVg); dTcen_dVd = TL1 * (T5 * 4.0 * dVth_dVd + dVgsteff_dVd); dTcen_dVb = TL1 * (T5 * 4.0 * dVth_dVb + dVgsteff_dVb); dTcen_dVe = TL1 * (T5 * 4.0 * dVth_dVe + dVgsteff_dVe); if (selfheat) /*fix below expression Wagner */ /*dTcen_dT = -Tcen * T2 / T1 * (T5 * 4.0 * (dVth_dT - dVfbzb_dT) + dVgsteff_dT);*/ dTcen_dT = -Tcen * T2 / T1 * (T5 * 4.0 * (dVth_dT - dVfbzb_dT - dphi_dT) + dVgsteff_dT); else dTcen_dT = 0; Ccen = epssub / Tcen; T0 = Cox / (Cox + Ccen); Coxeff = T0 * Ccen; T1 = -Ccen / Tcen; /* LFW_FD fix/add next 5 lines */ TL1 = dCoxeff_dVg = T0 * T0 * T1; dCoxeff_dVg = TL1 * dTcen_dVg; dCoxeff_dVd = TL1 * dTcen_dVd; dCoxeff_dVb = TL1 * dTcen_dVb; dCoxeff_dVe = TL1 * dTcen_dVe; if (selfheat) /*dCoxeff_dT = T1 * dTcen_dT * (T0 - Coxeff / (Cox + Ccen));*/ dCoxeff_dT = TL1 * dTcen_dT; /* LFW_FD fix line */ else dCoxeff_dT = 0; CoxWLcen = CoxWL * Coxeff / Cox; CoxWLcenb = CoxWLb * Coxeff / Cox; /* 3 new lines Wagner*/ if (selfheat) dCoxWLcenb_dT = CoxWLb * dCoxeff_dT / Cox; else dCoxWLcenb_dT = 0; /* v4.1 */ CoxWLcen2 = 0.0; /* flexilint */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { /* T3 = 4.0 * (Vth + 1.12 - Vfbzb2 - phi); */ T3 = 4.0 * (Vth + eggbcp2 - Vfbzb2 - phi); /* bugfix v4.3.1 -Tanvir */ T2 = sqrt(T3*T3 + 0.0001); T5 = 0.5 * (1 + T3/T2); T4 = 0.5 * (T3 + T2); /* Tox += Tox; */ T0 = (Vgsteff2 + T4) / Tox; tmp = exp(0.7 * log(T0)); T1 = 1.0 + tmp; T2 = 0.7 * tmp / (T0 * Tox); Tcen2 = 1.9e-9 / T1; /* LFW_FD fix/add next 5 lines */ TL1 = dTcen2_dVg = -Tcen2 * T2 / T1; dTcen2_dVg = TL1 * (T5 * 4.0 * dVth_dVg + dVgsteff2_dVg); dTcen2_dVd = TL1 * (T5 * 4.0 * dVth_dVd + dVgsteff2_dVd); dTcen2_dVb = TL1 * (T5 * 4.0 * dVth_dVb + dVgsteff2_dVb); dTcen2_dVe = TL1 * (T5 * 4.0 * dVth_dVe + dVgsteff2_dVe); if (selfheat) /*fix below expression Wagner */ /*dTcen2_dT = -Tcen2 * T2 / T1 * (T5 * 4.0 * (dVth_dT - dVfbzb2_dT) + dVgsteff2_dT); */ dTcen2_dT = -Tcen2 * T2 / T1 * (T5 * 4.0 * (dVth_dT - dVfbzb2_dT - dphi_dT) + dVgsteff2_dT); else dTcen2_dT = 0; /*Ccen2 = EPSSI / Tcen2;*//*Bug Fix # 30 Jul09*/ Ccen2 = epssub/ Tcen2; T0 = Cox / (Cox + Ccen2); Coxeff2 = T0 * Ccen2; T1 = -Ccen2 / Tcen2; /* LFW_FD fix/add next 5 lines */ TL1 = dCoxeff2_dVg = T0 * T0 * T1; dCoxeff2_dVg = TL1 * dTcen2_dVg; dCoxeff2_dVd = TL1 * dTcen2_dVd; dCoxeff2_dVb = TL1 * dTcen2_dVb; dCoxeff2_dVe = TL1 * dTcen2_dVe; if (selfheat) dCoxeff2_dT = T1 * dTcen2_dT * (T0 - Coxeff2 / (Cox + Ccen2)); else dCoxeff2_dT = 0; CoxWLcen2 = CoxWL2 * Coxeff2 / Cox; CoxWLcenb2 = CoxWLb2 * Coxeff2 / Cox; /* 3 new lines Wagner */ if (selfheat) dCoxWLcenb2_dT = CoxWLb2 * dCoxeff2_dT / Cox; else dCoxWLcenb2_dT = 0; } /* end v4.1 */ AbulkCV = Abulk0 * pParam->B4SOIabulkCVfactor; /* LFW_FD fix/add next 4 lines */ dAbulkCV_dVg = pParam->B4SOIabulkCVfactor * dAbulk0_dVg; dAbulkCV_dVb = pParam->B4SOIabulkCVfactor * dAbulk0_dVb; dAbulkCV_dVd = pParam->B4SOIabulkCVfactor * dAbulk0_dVd; dAbulkCV_dVe = pParam->B4SOIabulkCVfactor * dAbulk0_dVe; /* 3 new lines Wagner */ if (selfheat) dAbulkCV_dT = dAbulk0_dT * pParam->B4SOIabulkCVfactor; else dAbulkCV_dT = 0; VdsatCV = (Vgsteff - DeltaPhi) / AbulkCV; /* LFW_FD add next 4 lines */ dVdsatCV_dVg = (dVgsteff_dVg - dDeltaPhi_dVg - VdsatCV * dAbulkCV_dVg) / AbulkCV; dVdsatCV_dVd = (dVgsteff_dVd - dDeltaPhi_dVd - VdsatCV * dAbulkCV_dVd) / AbulkCV; dVdsatCV_dVb = (dVgsteff_dVb - dDeltaPhi_dVb - VdsatCV * dAbulkCV_dVb) / AbulkCV; dVdsatCV_dVe = (dVgsteff_dVe - dDeltaPhi_dVe - VdsatCV * dAbulkCV_dVe) / AbulkCV; V4 = VdsatCV - Vds - DELTA_4; T0 = sqrt(V4 * V4 + 4.0 * DELTA_4 * VdsatCV); VdseffCV = VdsatCV - 0.5 * (V4 + T0); T1 = 0.5 * (1.0 + V4 / T0); T2 = DELTA_4 / T0; T3 = (1.0 - T1 - T2) / AbulkCV; T4 = T3 * ( 1.0 - dDeltaPhi_dVg); /* LFW_FD fix/add next 4 lines */ dVdseffCV_dVg = (1.0 - T1 - T2) * dVdsatCV_dVg; dVdseffCV_dVd = (1.0 - T1 - T2) * dVdsatCV_dVd + T1; dVdseffCV_dVb = (1.0 - T1 - T2) * dVdsatCV_dVb; dVdseffCV_dVe = (1.0 - T1 - T2) * dVdsatCV_dVe; /* 10 new lines Wagner */ if (selfheat) { dVdsatCV_dT = (dVgsteff_dT-dDeltaPhi_dT)/AbulkCV -VdsatCV*dAbulkCV_dT/AbulkCV; dTL1_dT = (V4 + 2.0 * DELTA_4) * dVdsatCV_dT / T0; dVdseffCV_dT = 0.5*dVdsatCV_dT - 0.5*dTL1_dT; } else { dVdsatCV_dT = 0; dVdseffCV_dT = 0; } T0 = AbulkCV * VdseffCV; T1 = Vgsteff - DeltaPhi; T2 = 12.0 * (T1 - 0.5 * T0 + 1.0e-20); T3 = T0 / T2; T4 = 1.0 - 12.0 * T3 * T3; T5 = AbulkCV * (6.0 * T0 * (4.0 * T1 - T0) / (T2 * T2) - 0.5); T6 = T5 * VdseffCV / AbulkCV; /* LFW_FD add next 16 lines */ dT0_dVg = dAbulkCV_dVg * VdseffCV + AbulkCV * dVdseffCV_dVg; dT0_dVd = dAbulkCV_dVd * VdseffCV + AbulkCV * dVdseffCV_dVd; dT0_dVb = dAbulkCV_dVb * VdseffCV + AbulkCV * dVdseffCV_dVb; dT0_dVe = dAbulkCV_dVe * VdseffCV + AbulkCV * dVdseffCV_dVe; dT1_dVg = dVgsteff_dVg - dDeltaPhi_dVg; dT1_dVd = dVgsteff_dVd - dDeltaPhi_dVd; dT1_dVb = dVgsteff_dVb - dDeltaPhi_dVb; dT1_dVe = dVgsteff_dVe - dDeltaPhi_dVe; dT2_dVg = 12.0 * (dT1_dVg - 0.5 * dT0_dVg); dT2_dVd = 12.0 * (dT1_dVd - 0.5 * dT0_dVd); dT2_dVb = 12.0 * (dT1_dVb - 0.5 * dT0_dVb); dT2_dVe = 12.0 * (dT1_dVe - 0.5 * dT0_dVe); dT3_dVg = (dT0_dVg - T3 * dT2_dVg) / T2; dT3_dVd = (dT0_dVd - T3 * dT2_dVd) / T2; dT3_dVb = (dT0_dVb - T3 * dT2_dVb) / T2; dT3_dVe = (dT0_dVe - T3 * dT2_dVe) / T2; qgate1 = qinv = qgate = qinoi = CoxWLcen * (T1 - T0 * (0.5 - T3)); /* enhanced line Wagner */ QovCox = qgate / Coxeff; /* LFW_FD fix/add next 4 lines */ Cgg1 = CoxWLcen * (dT1_dVg - dT0_dVg * (0.5 - T3) + T0 * dT3_dVg) + QovCox * dCoxeff_dVg; Cgd1 = CoxWLcen * (dT1_dVd - dT0_dVd * (0.5 - T3) + T0 * dT3_dVd) + QovCox * dCoxeff_dVd; Cgb1 = CoxWLcen * (dT1_dVb - dT0_dVb * (0.5 - T3) + T0 * dT3_dVb) + QovCox * dCoxeff_dVb; Cge1 = CoxWLcen * (dT1_dVe - dT0_dVe * (0.5 - T3) + T0 * dT3_dVe) + QovCox * dCoxeff_dVe; /* 10 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL2_dT = 12 * (dVgsteff_dT - dDeltaPhi_dT - 0.5*dTL1_dT); dTL3_dT = dTL1_dT/T2 - (T3/T2)*dTL2_dT; dqgate_dT = (qgate * dCoxeff_dT / Coxeff) + CoxWLcen * (dVgsteff_dT - dDeltaPhi_dT - dTL1_dT*(0.5-T3) + T0*dTL3_dT); } else dqgate_dT = 0; /* LFW_FD 2 new lines per flexilint */ T02 = T12 = T22 = T52 = 0.0; /* flexilint */ Cgg12 = Cgd12 = Cgb12 = Cge12 = 0.0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { VdsatCV2 = (Vgsteff2 - DeltaPhi2) / AbulkCV; /* LFW_FD add next 4 lines */ dVdsatCV2_dVg = (dVgsteff2_dVg - dDeltaPhi2_dVg - VdsatCV2 * dAbulkCV_dVg) / AbulkCV; dVdsatCV2_dVd = (dVgsteff2_dVd - dDeltaPhi2_dVd - VdsatCV2 * dAbulkCV_dVd) / AbulkCV; dVdsatCV2_dVb = (dVgsteff2_dVb - dDeltaPhi2_dVb - VdsatCV2 * dAbulkCV_dVb) / AbulkCV; dVdsatCV2_dVe = (dVgsteff2_dVe - dDeltaPhi2_dVe - VdsatCV2 * dAbulkCV_dVe) / AbulkCV; V4 = VdsatCV2 - Vds - DELTA_4; T02 = sqrt(V4 * V4 + 4.0 * DELTA_4 * VdsatCV2); VdseffCV2 = VdsatCV2 - 0.5 * (V4 + T02); T12 = 0.5 * (1.0 + V4 / T02); T22 = DELTA_4 / T02; T3 = (1.0 - T12 - T22) / AbulkCV; T4 = T3 * ( 1.0 - dDeltaPhi2_dVg); /* LFW_FD fix/add next 4 lines */ dVdseffCV2_dVg = (1.0 - T12 - T22) * dVdsatCV2_dVg; dVdseffCV2_dVd = (1.0 - T12 - T22) * dVdsatCV2_dVd + T12; dVdseffCV2_dVb = (1.0 - T12 - T22) * dVdsatCV2_dVb; dVdseffCV2_dVe = (1.0 - T12 - T22) * dVdsatCV2_dVe; /* 10 new lines Wagner */ if (selfheat) { dVdsatCV2_dT = (dVgsteff2_dT-dDeltaPhi2_dT)/AbulkCV -VdsatCV2*dAbulkCV_dT/AbulkCV; dTL1_dT = (V4 + 2.0 * DELTA_4) * dVdsatCV2_dT / T02; dVdseffCV2_dT = 0.5*dVdsatCV2_dT - 0.5*dTL1_dT; } else { dVdsatCV2_dT = 0; dVdseffCV2_dT = 0; } T02 = AbulkCV * VdseffCV2; T12 = Vgsteff2 - DeltaPhi2; T22 = 12.0 * (T12 - 0.5 * T02 + 1.0e-20); T3 = T02 / T22; T4 = 1.0 - 12.0 * T3 * T3; T52 = AbulkCV * (6.0 * T02 * (4.0 * T12 - T02) / (T22 * T22) - 0.5); T6 = T52 * VdseffCV2 / AbulkCV; /* LFW_FD add next 16 lines */ dT02_dVg = dAbulkCV_dVg * VdseffCV2 + AbulkCV * dVdseffCV2_dVg; dT02_dVd = dAbulkCV_dVd * VdseffCV2 + AbulkCV * dVdseffCV2_dVd; dT02_dVb = dAbulkCV_dVb * VdseffCV2 + AbulkCV * dVdseffCV2_dVb; dT02_dVe = dAbulkCV_dVe * VdseffCV2 + AbulkCV * dVdseffCV2_dVe; dT12_dVg = dVgsteff2_dVg - dDeltaPhi2_dVg; dT12_dVd = dVgsteff2_dVd - dDeltaPhi2_dVd; dT12_dVb = dVgsteff2_dVb - dDeltaPhi2_dVb; dT12_dVe = dVgsteff2_dVe - dDeltaPhi2_dVe; dT22_dVg = 12.0 * (dT12_dVg - 0.5 * dT02_dVg); dT22_dVd = 12.0 * (dT12_dVd - 0.5 * dT02_dVd); dT22_dVb = 12.0 * (dT12_dVb - 0.5 * dT02_dVb); dT22_dVe = 12.0 * (dT12_dVe - 0.5 * dT02_dVe); dT3_dVg = (dT02_dVg - T3 * dT22_dVg) / T22; dT3_dVd = (dT02_dVd - T3 * dT22_dVd) / T22; dT3_dVb = (dT02_dVb - T3 * dT22_dVb) / T22; dT3_dVe = (dT02_dVe - T3 * dT22_dVe) / T22; T7 = CoxWLcen2 * (T12 - T02 * (0.5 - T3)); qinv += T7; qgate = qinoi = qinv; QovCox2 = T7 / Coxeff2; /* LFW_FD fix/add next 4 lines */ Cgg12 = CoxWLcen2 * (dT12_dVg - dT02_dVg * (0.5 - T3) + T02 * dT3_dVg) + QovCox2 * dCoxeff2_dVg; Cgd12 = CoxWLcen2 * (dT12_dVd - dT02_dVd * (0.5 - T3) + T02 * dT3_dVd) + QovCox2 * dCoxeff2_dVd; Cgb12 = CoxWLcen2 * (dT12_dVb - dT02_dVb * (0.5 - T3) + T02 * dT3_dVb) + QovCox2 * dCoxeff2_dVb; Cge12 = CoxWLcen2 * (dT12_dVe - dT02_dVe * (0.5 - T3) + T02 * dT3_dVe) + QovCox2 * dCoxeff2_dVe; /* 11 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL2_dT = 12 * (dVgsteff2_dT - dDeltaPhi2_dT - 0.5*dTL1_dT); dTL3_dT = dTL1_dT/T22 - (T3/T22)*dTL2_dT; dqgate2_dT = (T7 * dCoxeff2_dT / Coxeff2) + CoxWLcen2 * (dVgsteff2_dT - dDeltaPhi2_dT - dTL1_dT*(0.5-T3) + T02*dTL3_dT); dqgate_dT += dqgate2_dT; } else dqgate_dT += 0; } /* end v4.1 */ /* v3.1 */ /* LFW_FD 2 new lines - flexilint */ Csg2 = Cbg12 = Cbd12 = Cbb12 = Cbe12 = 0; dqbulk_dT = 0; if (here->B4SOIsoiMod == 2) /* v3.2 */ /* ideal FD */ { qbulk = Cbg1 = Cbd1 = Cbb1 = Cbe1 = dqbulk_dT = 0; /* LFW_FD enhance line */ } else /* soiMod = 0 or 1 */ { T7 = 1.0 - AbulkCV; T8 = T2 * T2; T9 = 12.0 * T7 * T0 * T0 / (T8 * AbulkCV); T10 = T9 * (1.0 - dDeltaPhi_dVg); T11 = -T7 * T5 / AbulkCV; T12 = -(T9 * T1 / AbulkCV + VdseffCV * (0.5 - T0 / T2)); qbulk1 = qbulk = CoxWLcenb * T7 * (0.5 * VdseffCV - T0 * VdseffCV / T2); /* enhanced line Wagner */ QovCox = qbulk / Coxeff; /* LFW_FD fix/add next 4 derivatives */ Cbg1 = CoxWLcenb * T7 * (0.5 - T0 / T2) * dVdseffCV_dVg - CoxWLcenb * T7 * VdseffCV * ((dT0_dVg -T0 * dT2_dVg / T2) /T2) - CoxWLcenb * VdseffCV * (0.5 - T0 / T2) * dAbulkCV_dVg + QovCox * dCoxeff_dVg; Cbb1 = CoxWLcenb * T7 * (0.5 - T0 / T2) * dVdseffCV_dVb - CoxWLcenb * T7 * VdseffCV * ((dT0_dVb -T0 * dT2_dVb / T2) /T2) - CoxWLcenb * VdseffCV * (0.5 - T0 / T2) * dAbulkCV_dVb + QovCox * dCoxeff_dVb; Cbd1 = CoxWLcenb * T7 * (0.5 - T0 / T2) * dVdseffCV_dVd - CoxWLcenb * T7 * VdseffCV * ((dT0_dVd -T0 * dT2_dVd / T2) /T2) - CoxWLcenb * VdseffCV * (0.5 - T0 / T2) * dAbulkCV_dVd + QovCox * dCoxeff_dVd; Cbe1 = CoxWLcenb * T7 * (0.5 - T0 / T2) * dVdseffCV_dVe - CoxWLcenb * T7 * VdseffCV * ((dT0_dVe -T0 * dT2_dVe / T2) /T2) - CoxWLcenb * VdseffCV * (0.5 - T0 / T2) * dAbulkCV_dVe + QovCox * dCoxeff_dVe; /* 12 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL2_dT = 12 * (dVgsteff_dT - dDeltaPhi_dT - 0.5*dTL1_dT); TL3 = T0/T2; dTL3_dT = dTL1_dT/T2 - (TL3/T2)*dTL2_dT; TL4 = (0.5 * VdseffCV - T0 * VdseffCV / T2); dTL4_dT = (0.5 - T0/T2)*dVdseffCV_dT - VdseffCV*dTL3_dT; dqbulk_dT = dCoxWLcenb_dT * T7 * TL4 - CoxWLcenb * dAbulkCV_dT * TL4 + CoxWLcenb * T7 * dTL4_dT; } else dqbulk_dT = 0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T8 = T22 * T22; T9 = 12.0 * T7 * T02 * T02 / (T8 * AbulkCV); T10 = T9 * (1.0 - dDeltaPhi2_dVg); T11 = -T7 * T52 / AbulkCV; T12 = -(T9 * (Vgsteff2 - DeltaPhi2) / AbulkCV + VdseffCV2 * (0.5 - T02 / T22)); qbulk2 = CoxWLcenb2 * T7 * (0.5 * VdseffCV2 - T02 * VdseffCV2 / T22); QovCox2 = qbulk2 / Coxeff2; /* LFW_FD fix/add next 4 derivatives */ Cbg12 = CoxWLcenb2 * T7 * (0.5 - T02 / T22) * dVdseffCV2_dVg - CoxWLcenb2 * T7 * VdseffCV2 * ((dT02_dVg -T02 * dT22_dVg / T22) /T22) - CoxWLcenb2 * VdseffCV2 * (0.5 - T02 / T22) * dAbulkCV_dVg + QovCox2 * dCoxeff2_dVg; Cbb12 = CoxWLcenb2 * T7 * (0.5 - T02 / T22) * dVdseffCV2_dVb - CoxWLcenb2 * T7 * VdseffCV2 * ((dT02_dVb -T02 * dT22_dVb / T22) /T22) - CoxWLcenb2 * VdseffCV2 * (0.5 - T02 / T22) * dAbulkCV_dVb + QovCox2 * dCoxeff2_dVb; Cbd12 = CoxWLcenb2 * T7 * (0.5 - T02 / T22) * dVdseffCV2_dVd - CoxWLcenb2 * T7 * VdseffCV2 * ((dT02_dVd -T02 * dT22_dVd / T22) /T22) - CoxWLcenb2 * VdseffCV2 * (0.5 - T02 / T22) * dAbulkCV_dVd + QovCox2 * dCoxeff2_dVd; Cbe12 = CoxWLcenb2 * T7 * (0.5 - T02 / T22) * dVdseffCV2_dVe - CoxWLcenb2 * T7 * VdseffCV2 * ((dT02_dVe -T02 * dT22_dVe / T22) /T22) - CoxWLcenb2 * VdseffCV2 * (0.5 - T02 / T22) * dAbulkCV_dVe + QovCox2 * dCoxeff2_dVe; /* 12 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL2_dT = 12 * (dVgsteff2_dT - dDeltaPhi2_dT - 0.5*dTL1_dT); TL3 = T02/T22; dTL3_dT = dTL1_dT/T22 - (TL3/T22)*dTL2_dT; TL4 = (0.5 * VdseffCV2 - T02 * VdseffCV2 / T22); dTL4_dT = (0.5 - T02/T22)*dVdseffCV2_dT - VdseffCV2*dTL3_dT; dqbulk2_dT = dCoxWLcenb2_dT * T7 * TL4 - CoxWLcenb2 * dAbulkCV_dT * TL4 + CoxWLcenb2 * T7 * dTL4_dT; } else dqbulk2_dT = 0; qbulk += qbulk2; dqbulk_dT += dqbulk2_dT; /* new line Wagner */ } /* end v4.1 */ } /* v3.1 */ Csg2 = Csd2 = Csb2 = Cse2 = 0.0; /* LFW_FD enhance line */ dqsrc2_dT = 0; /* new line Wagner */ if (model->B4SOIxpart > 0.5) { /* 0/100 partition */ qsrc = -CoxWLcen * (T1 / 2.0 + T0 / 4.0 - 0.5 * T0 * T0 / T2); /* 9 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL5_dT = dVgsteff_dT - dDeltaPhi_dT; dTL2_dT = 12 * (dVgsteff_dT - dDeltaPhi_dT - 0.5*dTL1_dT); dqsrc_dT = qsrc*dCoxeff_dT/Coxeff -CoxWLcen*(dTL5_dT/2.0 + dTL1_dT/4.0 - T0*dTL1_dT/T2 + 0.5*T0*T0*dTL2_dT/(T2*T2) ); } else dqsrc_dT = 0; QovCox = qsrc / Coxeff; T2 += T2; T3 = T2 * T2; T7 = -(0.25 - 12.0 * T0 * (4.0 * T1 - T0) / T3); T4 = -(0.5 + 24.0 * T0 * T0 / T3) * (1.0 - dDeltaPhi_dVg); T5 = T7 * AbulkCV; T6 = T7 * VdseffCV; /* LFW_FD fix/add next 4 derivatives */ Csg = QovCox * dCoxeff_dVg - CoxWLcen * (dT1_dVg / 2.0 + dT0_dVg / 4.0 - 2.0 * T0 * dT0_dVg / T2 + 2.0 * T0 * T0 * dT2_dVg / (T2 * T2)); Csd = QovCox * dCoxeff_dVd - CoxWLcen * (dT1_dVd / 2.0 + dT0_dVd / 4.0 - 2.0 * T0 * dT0_dVd / T2 + 2.0 * T0 * T0 * dT2_dVd / (T2 * T2)); Csb = QovCox * dCoxeff_dVb - CoxWLcen * (dT1_dVb / 2.0 + dT0_dVb / 4.0 - 2.0 * T0 * dT0_dVb / T2 + 2.0 * T0 * T0 * dT2_dVb / (T2 * T2)); Cse = QovCox * dCoxeff_dVe - CoxWLcen * (dT1_dVe / 2.0 + dT0_dVe / 4.0 - 2.0 * T0 * dT0_dVe / T2 + 2.0 * T0 * T0 * dT2_dVe / (T2 * T2)); /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T12 = Vgsteff2 - DeltaPhi2; /* must restore for derivatives below*/ qsrc2 = -CoxWLcen2 * ( (Vgsteff2 - DeltaPhi2) / 2.0 + T02 / 4.0 - 0.5 * T02 * T02 / T22); /* CJB LFW */ /* 9 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL5_dT = dVgsteff2_dT - dDeltaPhi2_dT; dTL2_dT = 12 * (dVgsteff2_dT - dDeltaPhi2_dT - 0.5*dTL1_dT); dqsrc2_dT = qsrc2*dCoxeff2_dT/Coxeff2 -CoxWLcen2*(dTL5_dT/2.0 + dTL1_dT/4.0 - T02*dTL1_dT/T22 + 0.5*T02*T02*dTL2_dT/(T22*T22) ); } else dqsrc2_dT = 0; QovCox2 = qsrc2 / Coxeff2; T22 += T22; T3 = T22 * T22; T7 = -(0.25 - 12.0 * T02 * (4.0 * T12 - T02) / T3); T4 = -(0.5 + 24.0 * T02 * T02 / T3) * (1.0 - dDeltaPhi2_dVg); T5 = T7 * AbulkCV; T6 = T7 * VdseffCV2; /* LFW_FD fix/add next 4 derivatives */ Csg2 = QovCox2 * dCoxeff2_dVg - CoxWLcen2 * (dT12_dVg / 2.0 + dT02_dVg / 4.0 - 2.0 * T02 * dT02_dVg / T22 + 2.0 * T02 * T02 * dT22_dVg / (T22 * T22)); Csd2 = QovCox2 * dCoxeff2_dVd - CoxWLcen2 * (dT12_dVd / 2.0 + dT02_dVd / 4.0 - 2.0 * T02 * dT02_dVd / T22 + 2.0 * T02 * T02 * dT22_dVd / (T22 * T22)); Csb2 = QovCox2 * dCoxeff2_dVb - CoxWLcen2 * (dT12_dVb / 2.0 + dT02_dVb / 4.0 - 2.0 * T02 * dT02_dVb / T22 + 2.0 * T02 * T02 * dT22_dVb / (T22 * T22)); Cse2 = QovCox2 * dCoxeff2_dVe - CoxWLcen2 * (dT12_dVe / 2.0 + dT02_dVe / 4.0 - 2.0 * T02 * dT02_dVe / T22 + 2.0 * T02 * T02 * dT22_dVe / (T22 * T22)); qsrc += qsrc2; dqsrc_dT += dqsrc2_dT; /* new line Wagner */ } /* end v4.1 */ } else if (model->B4SOIxpart < 0.5) { /* 40/60 partition */ T2 = T2 / 12.0; T3 = 0.5 * CoxWLcen / (T2 * T2); T4 = T1 * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0 * T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0; qsrc = -T3 * T4; QovCox = qsrc / Coxeff; T8 = 4.0 / 3.0 * T1 * (T1 - T0) + 0.4 * T0 * T0; T5 = -2.0 * qsrc / T2 - T3 * (T1 * (3.0 * T1 - 8.0 * T0 / 3.0) + 2.0 * T0 * T0 / 3.0); T6 = AbulkCV * (qsrc / T2 + T3 * T8); T7 = T6 * VdseffCV / AbulkCV; /* LFW_FD add next 32 lines */ dT2_dVg = dT2_dVg / 12.0; dT2_dVd = dT2_dVd / 12.0; dT2_dVb = dT2_dVb / 12.0; dT2_dVe = dT2_dVe / 12.0; dT3_dVg = T3 * dCoxeff_dVg / Coxeff - 2.0 * T3 * T2 * dT2_dVg / (T2 * T2); dT3_dVd = T3 * dCoxeff_dVd / Coxeff - 2.0 * T3 * T2 * dT2_dVd / (T2 * T2); dT3_dVb = T3 * dCoxeff_dVb / Coxeff - 2.0 * T3 * T2 * dT2_dVb / (T2 * T2); dT3_dVe = T3 * dCoxeff_dVe / Coxeff - 2.0 * T3 * T2 * dT2_dVe / (T2 * T2); dT4_dVg = dT1_dVg * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0 * T0 / 3.0)) + T1 * (4.0 * T0 * dT0_dVg / 3.0 + dT1_dVg * (T1 - 4.0 * T0 / 3.0) + T1 * (dT1_dVg - 4.0 * dT0_dVg /3.0)) - 2.0 * T0 * T0 * dT0_dVg / 5.0; dT4_dVd = dT1_dVd * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0 * T0 / 3.0)) + T1 * (4.0 * T0 * dT0_dVd / 3.0 + dT1_dVd * (T1 - 4.0 * T0 / 3.0) + T1 * (dT1_dVd - 4.0 * dT0_dVd /3.0)) - 2.0 * T0 * T0 * dT0_dVd / 5.0; dT4_dVb = dT1_dVb * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0 * T0 / 3.0)) + T1 * (4.0 * T0 * dT0_dVb / 3.0 + dT1_dVb * (T1 - 4.0 * T0 / 3.0) + T1 * (dT1_dVb - 4.0 * dT0_dVb /3.0)) - 2.0 * T0 * T0 * dT0_dVb / 5.0; dT4_dVe = dT1_dVe * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0 * T0 / 3.0)) + T1 * (4.0 * T0 * dT0_dVe / 3.0 + dT1_dVe * (T1 - 4.0 * T0 / 3.0) + T1 * (dT1_dVe - 4.0 * dT0_dVe /3.0)) - 2.0 * T0 * T0 * dT0_dVe / 5.0; /* LFW_FD fix/add next 4 derivatives */ Csg = -(dT3_dVg * T4 + T3 * dT4_dVg); Csd = -(dT3_dVd * T4 + T3 * dT4_dVd); Csb = -(dT3_dVb * T4 + T3 * dT4_dVb); Cse = -(dT3_dVe * T4 + T3 * dT4_dVe); /* 13 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV_dT + dAbulkCV_dT * VdseffCV; dTL5_dT = dVgsteff_dT - dDeltaPhi_dT; dTL2_dT = (dVgsteff_dT - dDeltaPhi_dT - 0.5*dTL1_dT); dTL3_dT = - 2*T3*dTL2_dT/T2 + T3*dCoxeff_dT/Coxeff; dTL4_dT = dTL5_dT * (2.0*T0*T0/3.0 + T1*(T1-4.0*T0/3.0)) + T1 * (4.0*T0*dTL1_dT/3.0 + dTL5_dT*(T1-4.0*T0/3.0) + T1*(dTL5_dT-4.0*dTL1_dT/3.0) ) - 2.0*T0*T0*dTL1_dT/5.0; dqsrc_dT = -T3*dTL4_dT - dTL3_dT*T4; } else dqsrc_dT += 0; /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { T12 = Vgsteff2 - DeltaPhi2; /* must restore for derivatives below*/ T22 = T22 / 12.0; T3 = 0.5 * CoxWLcen2 / (T22 * T22); T4 = T12 * (2.0 * T02 * T02 / 3.0 + T12 * (T12 - 4.0 * T02 / 3.0)) - 2.0 * T02 * T02 * T02 / 15.0; qsrc2 = -T3 * T4; QovCox2 = qsrc2 / Coxeff2; T8 = 4.0 / 3.0 * T12 * (T12 - T02) + 0.4 * T02 * T02; T5 = -2.0 * qsrc2 / T22 - T3 * (T12 * (3.0 * T12 - 8.0 * T02 / 3.0) + 2.0 * T02 * T02 / 3.0); T6 = AbulkCV * (qsrc2 / T22 + T3 * T8); T7 = T6 * VdseffCV2 / AbulkCV; /* LFW_FD add next 32 lines */ dT22_dVg = dT22_dVg / 12.0; dT22_dVd = dT22_dVd / 12.0; dT22_dVb = dT22_dVb / 12.0; dT22_dVe = dT22_dVe / 12.0; dT3_dVg = T3 * dCoxeff2_dVg / Coxeff2 - 2.0 * T3 * T22 * dT22_dVg / (T22 * T22); dT3_dVd = T3 * dCoxeff2_dVd / Coxeff2 - 2.0 * T3 * T22 * dT22_dVd / (T22 * T22); dT3_dVb = T3 * dCoxeff2_dVb / Coxeff2 - 2.0 * T3 * T22 * dT22_dVb / (T22 * T22); dT3_dVe = T3 * dCoxeff2_dVe / Coxeff2 - 2.0 * T3 * T22 * dT22_dVe / (T22 * T22); dT4_dVg = dT12_dVg * (2.0 * T02 * T02 / 3.0 + T12 * (T12 - 4.0 * T02 / 3.0)) + T12 * (4.0 * T02 * dT02_dVg / 3.0 + dT12_dVg * (T12 - 4.0 * T02 / 3.0) + T12 * (dT12_dVg - 4.0 * dT02_dVg /3.0)) - 2.0 * T02 * T02 * dT02_dVg / 5.0; dT4_dVd = dT12_dVd * (2.0 * T02 * T02 / 3.0 + T12 * (T12 - 4.0 * T02 / 3.0)) + T12 * (4.0 * T02 * dT02_dVd / 3.0 + dT12_dVd * (T12 - 4.0 * T02 / 3.0) + T12 * (dT12_dVd - 4.0 * dT02_dVd /3.0)) - 2.0 * T02 * T02 * dT02_dVd / 5.0; dT4_dVb = dT12_dVb * (2.0 * T02 * T02 / 3.0 + T12 * (T12 - 4.0 * T02 / 3.0)) + T12 * (4.0 * T02 * dT02_dVb / 3.0 + dT12_dVb * (T12 - 4.0 * T02 / 3.0) + T12 * (dT12_dVb - 4.0 * dT02_dVb /3.0)) - 2.0 * T02 * T02 * dT02_dVb / 5.0; dT4_dVe = dT12_dVe * (2.0 * T02 * T02 / 3.0 + T12 * (T12 - 4.0 * T02 / 3.0)) + T12 * (4.0 * T02 * dT02_dVe / 3.0 + dT12_dVe * (T12 - 4.0 * T02 / 3.0) + T12 * (dT12_dVe - 4.0 * dT02_dVe /3.0)) - 2.0 * T02 * T02 * dT02_dVe / 5.0; /* LFW_FD fix/add next 4 derivatives */ Csg2 = -(dT3_dVg * T4 + T3 * dT4_dVg); Csd2 = -(dT3_dVd * T4 + T3 * dT4_dVd); Csb2 = -(dT3_dVb * T4 + T3 * dT4_dVb); Cse2 = -(dT3_dVe * T4 + T3 * dT4_dVe); /* 14 new lines Wagner */ if (selfheat) { dTL1_dT = AbulkCV * dVdseffCV2_dT + dAbulkCV_dT * VdseffCV2; dTL5_dT = dVgsteff2_dT - dDeltaPhi2_dT; dTL2_dT = (dVgsteff2_dT - dDeltaPhi2_dT - 0.5*dTL1_dT); dTL3_dT = - 2*T3*dTL2_dT/T22 + T3*dCoxeff2_dT/Coxeff2; dTL4_dT = dTL5_dT * (2.0*T02*T02/3.0 + T12*(T12-4.0*T02/3.0)) + T12 * (4.0*T02*dTL1_dT/3.0 + dTL5_dT*(T12-4.0*T02/3.0) + T12*(dTL5_dT-4.0*dTL1_dT/3.0) ) - 2.0*T02*T02*dTL1_dT/5.0; dqsrc2_dT = -T3*dTL4_dT - dTL3_dT*T4; } else dqsrc_dT += 0; qsrc += qsrc2; dqsrc_dT += dqsrc2_dT; /* new line Wagner */ } /* end v4.1 */ } else { /* 50/50 partition */ qsrc = -0.5 * qgate; Csg = -0.5 * Cgg1; Csd = -0.5 * Cgd1; Csb = -0.5 * Cgb1; Cse = -0.5 * Cge1; /* LFW_FD new line */ /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { /* LFW_FD fix/add next 4 lines */ Csg2 = -0.5 * Cgg12; Csd2 = -0.5 * Cgd12; Csb2 = -0.5 * Cgb12; Cse2 = -0.5 * Cge12; } dqsrc_dT = -0.5 * dqgate_dT; /* new line Wagner */ /* end v4.1 */ } /* Backgate charge */ /* v3.1 */ if (here->B4SOIsoiMod == 2) /* v3.2 */ /* ideal FD */ { Qe1 = Ce1b = Ce1e = Ce1T = dQe1_dT = 0; } else /* soiMod = 0 or 1 */ { CboxWL = pParam->B4SOIkb1 * model->B4SOIfbody * Cbox * (pParam->B4SOIweffCV / here->B4SOInseg * here->B4SOInf /* bugfix_snps nf*/ * pParam->B4SOIleffCVbg + here->B4SOIaebcp); Qe1 = CboxWL * (Vesfb - Vbs); Ce1b = dQe1_dVb = -CboxWL; Ce1e = dQe1_dVe = CboxWL; if (selfheat) Ce1T = dQe1_dT = -CboxWL * dvfbb_dT; else Ce1T = dQe1_dT = 0.0; } /* v3.1 */ qgate += Qac0 + Qsub0 - qbulk; qbody = qbulk - Qac0 - Qsub0 - Qe1; qsub = Qe1; qdrn = -(qgate + qbody + qsub + qsrc); /* 8 new lines Wagner */ dqgate_dT += dQac0_dT + dQsub0_dT - dqbulk_dT; dqbody_dT = dqbulk_dT - dQac0_dT - dQsub0_dT - dQe1_dT; dqsub_dT = dQe1_dT; dqdrn_dT = -(dqgate_dT + dqbody_dT + dqsub_dT + dqsrc_dT); CgT = dqgate_dT; CbT = dqbody_dT; CsT = dqsrc_dT; CdT = dqdrn_dT; Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg; /* LFW_FD fix/add next 3 lines */ Cbd = Cbd1 - dQac0_dVd - dQsub0_dVd; Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb - Ce1b; Cbe = Cbe1 - dQac0_dVe - dQsub0_dVe - Ce1e; Cgg = Cgg1 - Cbg; Cgd = Cgd1 - Cbd; /* LFW_FD fix/add next 2 lines */ Cgb = Cgb1 - Cbb - Ce1b; Cge = Cge1 - Cbe - Ce1e; /* comment out next 4 lines Wagner */ /*if (selfheat) CgT = Cgg1 * dVgsteff_dT + dQac0_dT + dQsub0_dT; else CgT = 0.0;*/ /*Cgb *= dVbseff_dVb; */ /*Cbb *= dVbseff_dVb; */ /*Csb *= dVbseff_dVb; */ /* comment out next 2 lines Wagner */ /*if (selfheat) CsT = Csg * dVgsteff_dT; else CsT = 0.0;*/ /* v4.1 */ if ((here->B4SOIsoiMod != 2) && /* Bug fix #10 Jun 09 'opposite type Q/C evaluated only if bodymod=1' */ (here->B4SOIbodyMod != 0) && here->B4SOIagbcp2 > 0) { /* LFW_FD fix next 12 lines */ Cbg += Cbg12 - dQac02_dVg - dQsub02_dVg; Cbd += Cbd12 - dQac02_dVd - dQsub02_dVd; Cbb += Cbb12 - dQac02_dVb - dQsub02_dVb; Cbe += Cbe12 - dQac02_dVe - dQsub02_dVe; Cgg = Cgg1 + Cgg12 - Cbg; Cgd = Cgd1 + Cgd12 - Cbd; Cgb = Cgb1 + Cgb12 - Cbb - Ce1b; Cge = Cge1 + Cge12 - Cbe - Ce1e; Csg += Csg2; Csd += Csd2; Csb += Csb2; Cse += Cse2; } /* end v4.1 */ here->B4SOIcggb = Cgg; here->B4SOIcgsb = -(Cgg + Cgd + Cgb + Cge); /* LFW_FD fix line */ here->B4SOIcgdb = Cgd; here->B4SOIcgeb = Cge; /* LFW_FD fix line */ here->B4SOIcgT = CgT; here->B4SOIcbgb = Cbg; here->B4SOIcbsb = -(Cbg + Cbd + Cbb + Cbe); /* LFW_FD fix line */ here->B4SOIcbdb = Cbd; here->B4SOIcbeb = Cbe; /* LFW_FD fix line */ here->B4SOIcbT = CbT; here->B4SOIceT = Ce1T; here->B4SOIceeb = Ce1e ; here->B4SOIcdgb = -(Cgg + Cbg + Csg); here->B4SOIcddb = -(Cgd + Cbd + Csd); here->B4SOIcdeb = -(Cge + Cse + Cbe) - Ce1e; /* LFW_FD fix line */ here->B4SOIcdT = -(CgT+CbT+CsT) - Ce1T; here->B4SOIcdsb = Cgg + Cgd + Cgb + Cge /* LFW_FD fix expression */ + Cbg + Cbd + Cbb + Cbe + Ce1e + Csg + Csd + Csb + Cse + Ce1b; here->B4SOIqinv = -qinoi; } /* End of if capMod ==3 */ else { /* v4.0 */ Qsub0 = Qac0 = 0.0; qgate = qdrn = qsrc = qbody = qsub = 0.0; Cbg = Cbd = Cbb = 0.0; here->B4SOIcggb = here->B4SOIcgsb = here->B4SOIcgdb = 0.0; here->B4SOIcdgb = here->B4SOIcdsb = here->B4SOIcddb = 0.0; here->B4SOIcbgb = here->B4SOIcbsb = here->B4SOIcbdb = 0.0; } } here->B4SOIqgate = qgate; here->B4SOIqdrn = qdrn; here->B4SOIqbulk = qbody; here->B4SOIqsrc = qsrc; finished: /* returning Values to Calling Routine */ /* * COMPUTE EQUIVALENT DRAIN CURRENT SOURCE */ /* flexilint inits */ gcjdbs = gcjdT = 0.0; gcjsbs = gcjsT = 0.0; if (ChargeComputationNeeded) { /* Intrinsic S/D junction charge */ /* v3.1 */ if (here->B4SOIsoiMod == 2) /* v3.2 */ /* ideal FD */ { qjs = qjd = 0.0; /*gcjdds = gcjdbs = gcjdT = 0.0; v4.2 */ gcjdbs = gcjdT = 0.0; gcjsbs = gcjsT = 0.0; here->B4SOIcjsb = here->B4SOIcjdb = 0.0 /*v4.0*/; } else /* soiMod = 0 or 1 */ { PhiBSWG = model->B4SOIGatesidewallJctSPotential; dPhiBSWG_dT = -model->B4SOItpbswg; PhiBSWG += dPhiBSWG_dT * (Temp - model->B4SOItnom); MJSWG = model->B4SOIbodyJctGateSideSGradingCoeff; cjsbs = model->B4SOIunitLengthGateSidewallJctCapS * pParam->B4SOIwdiosCV * model->B4SOItsi * here->B4SOInf / 1e-7; /* bugfix_snps nf*/ dcjsbs_dT = cjsbs * model->B4SOItcjswg; cjsbs += dcjsbs_dT * (Temp - model->B4SOItnom); cjdbs = model->B4SOIunitLengthGateSidewallJctCapD * pParam->B4SOIwdiodCV * model->B4SOItsi * here->B4SOInf / 1e-7; /* bugfix_snps nf*/ dcjdbs_dT = cjdbs * model->B4SOItcjswgd; cjdbs += dcjdbs_dT * (Temp - model->B4SOItnom); DioMax = 0.9 * (PhiBSWG); /* arg = 1.0 - (Vbs > DioMax ? DioMax : Vbs) / PhiBSWG; */ /* Bug fix #6 Vbs evaluated taking consideration of Rbody Mode*/ if (here->B4SOIrbodyMod) arg = 1.0 - (vsbs > DioMax ? DioMax : vsbs) / PhiBSWG; /* Bug fix #6 */ else arg = 1.0 - (vbs > DioMax ? DioMax : vbs) / PhiBSWG; /* Bug fix #6 */ if (selfheat) darg_dT = (1 - arg) / PhiBSWG * dPhiBSWG_dT; else darg_dT = 1.0; /* flexilint */ if (MJSWG == 0.5) { dT3_dVb = 1.0 / sqrt(arg); if (selfheat) ddT3_dVb_dT = -0.5 * dT3_dVb / arg * darg_dT; else ddT3_dVb_dT = 1.0; /* flexilint */ } else { dT3_dVb = exp(-MJSWG * log(arg)); if (selfheat) ddT3_dVb_dT = -MJSWG * dT3_dVb / arg * darg_dT; else ddT3_dVb_dT = 1.0; /* flexilint */ } T3 = (1.0 - arg * dT3_dVb) * PhiBSWG / (1.0 - MJSWG); if (selfheat) dT3_dT = (1.0 - arg * dT3_dVb) * dPhiBSWG_dT / (1.0 - MJSWG) - (arg * ddT3_dVb_dT + darg_dT * dT3_dVb) * PhiBSWG / (1.0 - MJSWG); else dT3_dT = 1.0; /* flexilint */ /* if (vbs > DioMax) T3 += dT3_dVb * (vbs - DioMax); */ /* Bug fix #6 Vbs evaluated taking consideration of Rbody Mode*/ if (here->B4SOIrbodyMod) { if (vsbs > DioMax) /* Bug fix #6 */ T3 += dT3_dVb * (vsbs - DioMax); } else { if (vbs > DioMax) /* Bug fix #6 */ T3 += dT3_dVb * (vbs - DioMax); } if (here->B4SOImode > 0) { qjs = cjsbs * T3 + model->B4SOItt * Ibsdif * here->B4SOInf; gcjsbs = cjsbs * dT3_dVb + model->B4SOItt * dIbsdif_dVb * here->B4SOInf; /* 3 new lines */ if (selfheat) gcjsT = model->B4SOItt * dIbsdif_dT * here->B4SOInf + dcjsbs_dT * T3 + dT3_dT * cjsbs; else gcjsT = 0.0; } else { qjs = cjsbs * T3 + model->B4SOItt * Ibddif * here->B4SOInf; gcjsbs = cjsbs * dT3_dVb + model->B4SOItt * dIbddif_dVb * here->B4SOInf; /* 3 new lines */ if (selfheat) gcjsT = model->B4SOItt * dIbddif_dT * here->B4SOInf + dcjsbs_dT * T3 + dT3_dT * cjsbs; else gcjsT = 0.0; } /* comment out next 3 lines Wagner */ /*if (selfheat) gcjsT = model->B4SOItt * dIbsdif_dT * here->B4SOInf + dcjsbs_dT * T3 + dT3_dT * cjsbs; else gcjsT = 0.0; */ PhiBSWG = model->B4SOIGatesidewallJctDPotential; dPhiBSWG_dT = -model->B4SOItpbswgd; PhiBSWG += dPhiBSWG_dT * (Temp - model->B4SOItnom); MJSWG = model->B4SOIbodyJctGateSideDGradingCoeff; DioMax = 0.9 * (PhiBSWG); /* arg = 1.0 - (vbd > DioMax ? DioMax : vbd) / PhiBSWG; */ /* Bug fix #6 Vbd evaluated taking consideration of Rbody Mode*/ if (here->B4SOIrbodyMod) arg = 1.0 - (vdbd > DioMax ? DioMax : vdbd) / PhiBSWG; /* Bug Fix #6 */ else arg = 1.0 - (vbd > DioMax ? DioMax : vbd) / PhiBSWG; /* Bug Fix #6 */ if (selfheat) darg_dT = (1 - arg) / PhiBSWG * dPhiBSWG_dT; else darg_dT = 1.0; /* flexilint */ if (MJSWG == 0.5) { dT3_dVb = 1.0 / sqrt(arg); if (selfheat) ddT3_dVb_dT = -0.5 * dT3_dVb / arg * darg_dT; else ddT3_dVb_dT = 1.0; /* flexilint */ } else { dT3_dVb = exp(-MJSWG * log(arg)); if (selfheat) ddT3_dVb_dT = -MJSWG * dT3_dVb / arg * darg_dT; else ddT3_dVb_dT = 1.0; /* flexilint */ } T3 = (1.0 - arg * dT3_dVb) * PhiBSWG / (1.0 - MJSWG); if (selfheat) dT3_dT = (1.0 - arg * dT3_dVb) * dPhiBSWG_dT / (1.0 - MJSWG) - (arg * ddT3_dVb_dT + darg_dT * dT3_dVb) * PhiBSWG / (1.0 - MJSWG); else dT3_dT = 1.0; /* flexilint */ /* if (vbd > DioMax) T3 += dT3_dVb * (vbd - DioMax); */ /* Bug fix #6 Vbd evaluated taking consideration of Rbody Mode*/ if (here->B4SOIrbodyMod) { if (vdbd > DioMax) /* Bug fix #6 */ T3 += dT3_dVb * (vdbd - DioMax); } else { if (vbd > DioMax) /* Bug fix #6 */ T3 += dT3_dVb * (vbd - DioMax); } dT3_dVd = -dT3_dVb; if (here->B4SOImode > 0) { qjd = cjdbs * T3 + model->B4SOItt * Ibddif * here->B4SOInf; gcjdbs = cjdbs * dT3_dVb + model->B4SOItt * dIbddif_dVb * here->B4SOInf; /* 3 new lines Wagner */ if (selfheat) gcjdT = model->B4SOItt * dIbddif_dT * here->B4SOInf + dcjdbs_dT * T3 + dT3_dT * cjdbs; else gcjdT = 0.0; } else { qjd = cjdbs * T3 + model->B4SOItt * Ibsdif * here->B4SOInf; gcjdbs = cjdbs * dT3_dVb + model->B4SOItt * dIbsdif_dVb * here->B4SOInf; /* 3 new lines Wagner */ if (selfheat) gcjdT = model->B4SOItt * dIbsdif_dT * here->B4SOInf + dcjdbs_dT * T3 + dT3_dT * cjdbs; else gcjdT = 0.0; } /*gcjdds = cjdbs * dT3_dVd + model->B4SOItt * dIbddif_dVd; v4.2 */ /* comment out next 3 lines Wagner */ /*if (selfheat) gcjdT = model->B4SOItt * dIbddif_dT * here->B4SOInf + dcjdbs_dT * T3 + dT3_dT * cjdbs; else gcjdT = 0.0;*/ } /* v3.1 */ /* v4.0 */ /* qdrn -= qjd; qbody += (qjs + qjd); qsrc = -(qgate + qbody + qdrn + qsub); */ /* Update the conductance */ /* v4.2 bugfix: qjs/qjd computed using unswapped voltages; however, total capacitances are swapped below note that gcjdds = -gcjdbs always, so (gcjdds + gcjdbs) == 0 here->B4SOIcddb -= gcjdds; here->B4SOIcdT -= gcjdT; here->B4SOIcdsb += gcjdds + gcjdbs; here->B4SOIcbdb += (gcjdds); here->B4SOIcbT += (gcjdT + gcjsT); here->B4SOIcbsb -= (gcjdds + gcjdbs + gcjsbs); here->B4SOIcjsb = (gcjdds + gcjdbs + gcjsbs); here->B4SOIcjdb = -gcjdds; */ here->B4SOIcbT += (gcjdT + gcjsT); if (here->B4SOImode > 0) { here->B4SOIcddb += gcjdbs; here->B4SOIcdT -= gcjdT; here->B4SOIcbdb -= (gcjdbs); here->B4SOIcbsb -= (gcjsbs); here->B4SOIcjsb = gcjsbs; here->B4SOIcjdb = gcjdbs; } else { here->B4SOIcddb += gcjsbs; here->B4SOIcdT -= gcjsT; here->B4SOIcbdb -= (gcjsbs); here->B4SOIcbsb -= (gcjdbs); here->B4SOIcjsb = gcjdbs; here->B4SOIcjdb = gcjsbs; } /* Extrinsic Bottom S/D to substrate charge */ T10 = -model->B4SOItype * ves; /* T10 is vse without type conversion */ T11 = model->B4SOItype * (vds - ves); /* T11 is vde without type conversion */ if (model->B4SOIcsdmin != 0.0) { if ( ((pParam->B4SOInsub > 0) && (model->B4SOItype > 0)) || ((pParam->B4SOInsub < 0) && (model->B4SOItype < 0)) ) { if (T10 < pParam->B4SOIvsdfb) { here->B4SOIqse = here->B4SOIcsbox * (T10 - pParam->B4SOIvsdfb); here->B4SOIgcse = here->B4SOIcsbox; } else if (T10 < pParam->B4SOIsdt1) { T0 = T10 - pParam->B4SOIvsdfb; T1 = T0 * T0; here->B4SOIqse = T0 * (here->B4SOIcsbox - pParam->B4SOIst2 / 3 * T1) ; here->B4SOIgcse = here->B4SOIcsbox - pParam->B4SOIst2 * T1; } else if (T10 < pParam->B4SOIvsdth) { T0 = T10 - pParam->B4SOIvsdth; T1 = T0 * T0; here->B4SOIqse = here->B4SOIcsmin * T10 + here->B4SOIst4 + pParam->B4SOIst3 / 3 * T0 * T1; here->B4SOIgcse = here->B4SOIcsmin + pParam->B4SOIst3 * T1; } else { here->B4SOIqse = here->B4SOIcsmin * T10 + here->B4SOIst4; here->B4SOIgcse = here->B4SOIcsmin; } } else { if (T10 < pParam->B4SOIvsdth) { here->B4SOIqse = here->B4SOIcsmin * (T10 - pParam->B4SOIvsdth); here->B4SOIgcse = here->B4SOIcsmin; } else if (T10 < pParam->B4SOIsdt1) { T0 = T10 - pParam->B4SOIvsdth; T1 = T0 * T0; here->B4SOIqse = T0 * (here->B4SOIcsmin - pParam->B4SOIst2 / 3 * T1) ; here->B4SOIgcse = here->B4SOIcsmin - pParam->B4SOIst2 * T1; } else if (T10 < pParam->B4SOIvsdfb) { T0 = T10 - pParam->B4SOIvsdfb; T1 = T0 * T0; here->B4SOIqse = here->B4SOIcsbox * T10 + here->B4SOIst4 + pParam->B4SOIst3 / 3 * T0 * T1; here->B4SOIgcse = here->B4SOIcsbox + pParam->B4SOIst3 * T1; } else { here->B4SOIqse = here->B4SOIcsbox * T10 + here->B4SOIst4; here->B4SOIgcse = here->B4SOIcsbox; } } if ( ((pParam->B4SOInsub > 0) && (model->B4SOItype > 0)) || ((pParam->B4SOInsub < 0) && (model->B4SOItype < 0)) ) { if (T11 < pParam->B4SOIvsdfb) { here->B4SOIqde = here->B4SOIcdbox * (T11 - pParam->B4SOIvsdfb); here->B4SOIgcde = here->B4SOIcdbox; } else if (T11 < pParam->B4SOIsdt1) { T0 = T11 - pParam->B4SOIvsdfb; T1 = T0 * T0; here->B4SOIqde = T0 * (here->B4SOIcdbox - pParam->B4SOIdt2 / 3 * T1) ; here->B4SOIgcde = here->B4SOIcdbox - pParam->B4SOIdt2 * T1; } else if (T11 < pParam->B4SOIvsdth) { T0 = T11 - pParam->B4SOIvsdth; T1 = T0 * T0; here->B4SOIqde = here->B4SOIcdmin * T11 + here->B4SOIdt4 + pParam->B4SOIdt3 / 3 * T0 * T1; here->B4SOIgcde = here->B4SOIcdmin + pParam->B4SOIdt3 * T1; } else { here->B4SOIqde = here->B4SOIcdmin * T11 + here->B4SOIdt4; here->B4SOIgcde = here->B4SOIcdmin; } } else { if (T11 < pParam->B4SOIvsdth) { here->B4SOIqde = here->B4SOIcdmin * (T11 - pParam->B4SOIvsdth); here->B4SOIgcde = here->B4SOIcdmin; } else if (T11 < pParam->B4SOIsdt1) { T0 = T11 - pParam->B4SOIvsdth; T1 = T0 * T0; here->B4SOIqde = T0 * (here->B4SOIcdmin - pParam->B4SOIdt2 / 3 * T1) ; here->B4SOIgcde = here->B4SOIcdmin - pParam->B4SOIdt2 * T1; } else if (T11 < pParam->B4SOIvsdfb) { T0 = T11 - pParam->B4SOIvsdfb; T1 = T0 * T0; here->B4SOIqde = here->B4SOIcdbox * T11 + here->B4SOIdt4 + pParam->B4SOIdt3 / 3 * T0 * T1; here->B4SOIgcde = here->B4SOIcdbox + pParam->B4SOIdt3 * T1; } else { here->B4SOIqde = here->B4SOIcdbox * T11 + here->B4SOIdt4; here->B4SOIgcde = here->B4SOIcdbox; } } } else { here->B4SOIqse = here->B4SOIcsbox * T10; here->B4SOIgcse = here->B4SOIcsbox; here->B4SOIqde = here->B4SOIcdbox * T11; here->B4SOIgcde = here->B4SOIcdbox; } /* Extrinsic : Sidewall fringing S/D charge */ here->B4SOIqse += here->B4SOIcsesw * T10; here->B4SOIgcse += here->B4SOIcsesw; here->B4SOIqde += here->B4SOIcdesw * T11; here->B4SOIgcde += here->B4SOIcdesw; /* All charge are multiplied with type at the end, but qse and qde have true polarity => so pre-multiplied with type */ here->B4SOIqse *= model->B4SOItype; here->B4SOIqde *= model->B4SOItype; } else { /* v4.0 */ qjs = qjd = 0.0; here->B4SOIqse = here->B4SOIqde = 0.0; here->B4SOIgcse = here->B4SOIgcde = 0.0; } here->B4SOIcbb = Cbb; here->B4SOIcbd = Cbd; here->B4SOIcbg = Cbg; here->B4SOIqbf = -Qsub0 - Qac0; here->B4SOIqjs = qjs; here->B4SOIqjd = qjd; *(ckt->CKTstate0 + here->B4SOIqbs) = qjs; /* v4.0 */ *(ckt->CKTstate0 + here->B4SOIqbd) = qjd; /* v4.0 */ /* * check convergence */ if ((here->B4SOIoff == 0) || (!(ckt->CKTmode & MODEINITFIX))) { if (Check == 1) { ckt->CKTnoncon++; #ifndef NEWCONV } else { tol = ckt->CKTreltol * MAX(fabs(cdhat), fabs(here->B4SOIcd)) + ckt->CKTabstol; if (fabs(cdhat - here->B4SOIcd) >= tol) { ckt->CKTnoncon++; } else { tol = ckt->CKTreltol * MAX(fabs(cbhat), fabs(here->B4SOIcbs + here->B4SOIcbd)) + ckt->CKTabstol; if (fabs(cbhat - (here->B4SOIcbs + here->B4SOIcbd)) > tol) { ckt->CKTnoncon++; } } #endif /* NEWCONV */ } } *(ckt->CKTstate0 + here->B4SOIvg) = vg; *(ckt->CKTstate0 + here->B4SOIvd) = vd; *(ckt->CKTstate0 + here->B4SOIvs) = vs; *(ckt->CKTstate0 + here->B4SOIvp) = vp; *(ckt->CKTstate0 + here->B4SOIve) = ve; *(ckt->CKTstate0 + here->B4SOIvbs) = vbs; *(ckt->CKTstate0 + here->B4SOIvbd) = vbd; *(ckt->CKTstate0 + here->B4SOIvgs) = vgs; *(ckt->CKTstate0 + here->B4SOIvds) = vds; *(ckt->CKTstate0 + here->B4SOIves) = ves; *(ckt->CKTstate0 + here->B4SOIvps) = vps; *(ckt->CKTstate0 + here->B4SOIdeltemp) = delTemp; /* v3.1 added for RF */ *(ckt->CKTstate0 + here->B4SOIvgge) = vgge; *(ckt->CKTstate0 + here->B4SOIvggm) = vggm; *(ckt->CKTstate0 + here->B4SOIvges) = vges; *(ckt->CKTstate0 + here->B4SOIvgms) = vgms; /* v3.1 added for RF end*/ *(ckt->CKTstate0 + here->B4SOIvdbs) = vdbs; /* v4.0 */ *(ckt->CKTstate0 + here->B4SOIvdbd) = vdbd; /* v4.0 */ *(ckt->CKTstate0 + here->B4SOIvsbs) = vsbs; /* v4.0 */ *(ckt->CKTstate0 + here->B4SOIvses) = vses; *(ckt->CKTstate0 + here->B4SOIvdes) = vdes; /* bulk and channel charge plus overlaps */ if (!ChargeComputationNeeded) goto line850; line755: ag0 = ckt->CKTag[0]; T0 = vgd + DELTA_1; if (here->B4SOIrgateMod == 3) T0 = vgmd + DELTA_1; /* v3.2 bug fix */ T1 = sqrt(T0 * T0 + 4.0 * DELTA_1); T2 = 0.5 * (T0 - T1); /* v2.2.3 bug fix */ T3 = pParam->B4SOIwdiodCV * pParam->B4SOIcgdl; /* v3.1 bug fix */ T4 = sqrt(1.0 - 4.0 * T2 / pParam->B4SOIckappa); cgdo = pParam->B4SOIcgdo + T3 - T3 * (1.0 - 1.0 / T4) * (0.5 - 0.5 * T0 / T1); qgdo = (pParam->B4SOIcgdo + T3) * vgd - T3 * (T2 + 0.5 * pParam->B4SOIckappa * (T4 - 1.0)); if (here->B4SOIrgateMod == 3) { qgdo = (pParam->B4SOIcgdo + T3) * vgmd - T3 * (T2 + 0.5 * pParam->B4SOIckappa * (T4 - 1.0)); } /* v3.2 bug fix */ T0 = vgs + DELTA_1; if (here->B4SOIrgateMod == 3) T0 = vgms + DELTA_1; /* v3.2 bug fix */ T1 = sqrt(T0 * T0 + 4.0 * DELTA_1); T2 = 0.5 * (T0 - T1); /* v2.2.3 bug fix */ T3 = pParam->B4SOIwdiosCV * pParam->B4SOIcgsl; /* v3.1 bug fix */ T4 = sqrt(1.0 - 4.0 * T2 / pParam->B4SOIckappa); cgso = pParam->B4SOIcgso + T3 - T3 * (1.0 - 1.0 / T4) * (0.5 - 0.5 * T0 / T1); qgso = (pParam->B4SOIcgso + T3) * vgs - T3 * (T2 + 0.5 * pParam->B4SOIckappa * (T4 - 1.0)); if (here->B4SOIrgateMod == 3) { qgso = (pParam->B4SOIcgso + T3) * vgms - T3 * (T2 + 0.5 * pParam->B4SOIckappa * (T4 - 1.0)); } /* v3.2 bug fix */ if (here->B4SOInf != 1.0) { cgdo *= here->B4SOInf; cgso *= here->B4SOInf; qgdo *= here->B4SOInf; qgso *= here->B4SOInf; } /* here->B4SOIcgdo = cgdo; here->B4SOIcgso = cgso; */ if (here->B4SOIdebugMod < 0) goto line850; if (here->B4SOImode > 0) { /* v3.1 added for RF */ if (here->B4SOIrgateMod == 3) { gcgmgmb = (cgdo + cgso + pParam->B4SOIcgeo) * ag0; gcgmdb = -cgdo * ag0; gcgmsb = -cgso * ag0; gcgmeb = -pParam->B4SOIcgeo * ag0; gcdgmb = gcgmdb; gcsgmb = gcgmsb; gcegmb = gcgmeb; gcggb = here->B4SOIcggb * ag0; gcgdb = here->B4SOIcgdb * ag0; gcgsb = here->B4SOIcgsb * ag0; gcgeb = here->B4SOIcgeb * ag0; /* fix line */ gcgbb = -(gcggb + gcgdb + gcgsb + gcgeb); gcdgb = here->B4SOIcdgb * ag0; gcegb = gcgeb; /*v3.1 added*/ gcsgb = -(here->B4SOIcggb + here->B4SOIcbgb + here->B4SOIcdgb) * ag0 - gcegb; gcbgb = here->B4SOIcbgb * ag0; qgd = qgdo; qgs = qgso; qge = 0; /* v3.1 change */ qgme = pParam->B4SOIcgeo * vgme; qgmid = qgdo + qgso + qgme; qdrn += here->B4SOIqde - qgd; qsub -= qgme + here->B4SOIqse + here->B4SOIqde; qsrc = -(qgate + qgmid + qbody + qdrn + qsub) - qjs; qdrn -= qjd; if (!here->B4SOIrbodyMod) qbody += qjd + qjs; } else { gcggb = (here->B4SOIcggb + cgdo + cgso + pParam->B4SOIcgeo) * ag0; gcgdb = (here->B4SOIcgdb - cgdo) * ag0; gcgsb = (here->B4SOIcgsb - cgso) * ag0; gcgeb = (here->B4SOIcgeb - pParam->B4SOIcgeo) *ag0; /* LFW_FD fix line */ gcgbb = -(gcggb + gcgdb + gcgsb + gcgeb); gcegb = (- pParam->B4SOIcgeo) * ag0; gcdgb = (here->B4SOIcdgb - cgdo) * ag0; gcsgb = -(here->B4SOIcggb + here->B4SOIcbgb + here->B4SOIcdgb + cgso) * ag0; gcbgb = here->B4SOIcbgb * ag0; gcdgmb = gcsgmb = gcegmb = 0.0; gcgmdb = gcgmsb = gcgmeb = 0.0; /* Lump the overlap capacitance and S/D parasitics */ qgd = qgdo; qgs = qgso; qge = pParam->B4SOIcgeo * vge; qgate += qgd + qgs + qge; qdrn += here->B4SOIqde - qgd; qsub -= qge + here->B4SOIqse + here->B4SOIqde; qsrc = -(qgate + qbody + qdrn + qsub) - qjs; qdrn -= qjd; if (!here->B4SOIrbodyMod) qbody += qjd + qjs; } gcddb = (here->B4SOIcddb + cgdo + here->B4SOIgcde) * ag0; gcdsb = here->B4SOIcdsb * ag0; gcdeb = (here->B4SOIcdeb - here->B4SOIgcde) * ag0; /*fix below expression Wagner */ /*gcdT = model->B4SOItype * here->B4SOIcdT * ag0;*/ gcdT = here->B4SOIcdT * ag0; gcsdb = -(here->B4SOIcgdb + here->B4SOIcbdb + here->B4SOIcddb) * ag0; gcssb = (cgso + here->B4SOIgcse - (here->B4SOIcgsb + here->B4SOIcbsb + here->B4SOIcdsb)) * ag0; gcseb = -(here->B4SOIgcse + here->B4SOIcbeb + here->B4SOIcdeb + here->B4SOIcgeb + here->B4SOIceeb) * ag0; /* LFW_FD fix line */ /*fix below expression Wagner */ /*gcsT = - model->B4SOItype * (here->B4SOIcgT */ gcsT = - (here->B4SOIcgT + here->B4SOIcbT + here->B4SOIcdT + here->B4SOIceT) * ag0; /*fix below expression Wagner */ /*gcgT = model->B4SOItype * here->B4SOIcgT * ag0;*/ gcgT = here->B4SOIcgT * ag0; /* gcbdb = here->B4SOIcbdb * ag0; gcbsb = here->B4SOIcbsb * ag0; */ gcbeb = here->B4SOIcbeb * ag0; gcbT = model->B4SOItype * here->B4SOIcbT * ag0; /* v4.0 */ if (!here->B4SOIrbodyMod) { gcjdbdp = gcjsbsp = 0.0; gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb + gcdeb); gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb + gcseb); gcdbdb = gcsbsb = 0.0; gcbdb = here->B4SOIcbdb * ag0; gcbsb = here->B4SOIcbsb * ag0; here->B4SOIGGjdb = GGjdb = 0.0; here->B4SOIGGjsb = GGjsb = 0.0; } else { gcjdbdp = gcjdbs * ag0; gcjsbsp = gcjsbs * ag0; gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb + gcdeb) + gcjdbdp; gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb + gcseb) + gcjsbsp; /* v4.2 optimization: gcjdds + gcjdbs = 0 gcdbdb = gcjdds * ag0; gcsbsb = -(gcjdds + gcjdbs + gcjsbs) * ag0; */ gcdbdb = -gcjdbs * ag0; gcsbsb = -gcjsbs * ag0; gcbdb = here->B4SOIcbdb * ag0 - gcdbdb; gcbsb = here->B4SOIcbsb * ag0 - gcsbsb; here->B4SOIGGjdb = GGjdb = Gjdb; here->B4SOIGGjsb = GGjsb = Gjsb; } /* v4.0 end */ gcedb = (- here->B4SOIgcde) * ag0; gcesb = (- here->B4SOIgcse) * ag0; gceeb = (here->B4SOIgcse + here->B4SOIgcde + here->B4SOIceeb + pParam->B4SOIcgeo) * ag0; gceT = model->B4SOItype * here->B4SOIceT * ag0; gcTt = pParam->B4SOIcth * ag0; sxpart = 0.6; dxpart = 0.4; /* v3.1 moved the following original code ahead */ /* Lump the overlap capacitance and S/D parasitics */ /* qgd = qgdo; qgs = qgso; qge = pParam->B4SOIcgeo * vge; qgate += qgd + qgs + qge; qdrn += here->B4SOIqde - qgd; qsub -= qge + here->B4SOIqse + here->B4SOIqde; qsrc = -(qgate + qbody + qdrn + qsub); */ /* v3.1 end */ } else { if (here->B4SOIrgateMod == 3) { gcgmgmb = (cgdo + cgso + pParam->B4SOIcgeo) * ag0; gcgmdb = -cgdo * ag0; gcgmsb = -cgso * ag0; gcgmeb = -pParam->B4SOIcgeo * ag0; gcdgmb = gcgmdb; gcsgmb = gcgmsb; gcegmb = gcgmeb; gcggb = here->B4SOIcggb * ag0; gcgsb = here->B4SOIcgdb * ag0; gcgdb = here->B4SOIcgsb * ag0; gcgeb = here->B4SOIcgeb * ag0; /* LFW_FD fix line */ gcgbb = -(gcggb + gcgdb + gcgsb + gcgeb); /* v3.1 added gcgeb */ gcsgb = here->B4SOIcdgb * ag0; gcegb = gcgeb; /* v3.1 added */ gcdgb = -(here->B4SOIcggb + here->B4SOIcbgb + here->B4SOIcdgb) * ag0 - gcegb; /*v3.1 added gcegb*/ gcbgb = here->B4SOIcbgb * ag0; qgd = qgdo; qgs = qgso; qge = 0; /* v3.1 */ qgme = pParam->B4SOIcgeo * vgme; qgmid = qgdo + qgso + qgme; qgate += qge; qbody -= 0; qsrc = qdrn - qgs + here->B4SOIqse; qsub -= qgme + here->B4SOIqse + here->B4SOIqde; qdrn = -(qgate + qgmid + qbody + qsrc + qsub) -qjd; qsrc -= qjs; if (!here->B4SOIrbodyMod) qbody += qjs + qjd; } else { gcggb = (here->B4SOIcggb + cgdo + cgso + pParam->B4SOIcgeo) * ag0; gcgdb = (here->B4SOIcgsb - cgdo) * ag0; gcgsb = (here->B4SOIcgdb - cgso) * ag0; gcgeb = (here->B4SOIcgeb - pParam->B4SOIcgeo) * ag0; /* LFW_FD fix line */ gcgbb = -(gcggb + gcgdb + gcgsb + gcgeb); /*added gcgbb*/ gcegb = (- pParam->B4SOIcgeo) * ag0; /* LFW_FD fix line */ gcsgb = (here->B4SOIcdgb - cgso) * ag0; gcdgb = -(here->B4SOIcggb + here->B4SOIcbgb + here->B4SOIcdgb + cgdo) * ag0; gcbgb = here->B4SOIcbgb * ag0; gcdgmb = gcsgmb = gcegmb = 0.0; gcgmdb = gcgmsb = gcgmeb = 0.0; /* Lump the overlap capacitance and S/D parasitics */ qgd = qgdo; qgs = qgso; qge = pParam->B4SOIcgeo * vge; qgate += qgd + qgs + qge; qsrc = qdrn - qgs + here->B4SOIqse; qsub -= qge + here->B4SOIqse + here->B4SOIqde; qdrn = -(qgate + qbody + qsrc + qsub) - qjd; qsrc -= qjs; if (!here->B4SOIrbodyMod) qbody += qjs + qjd; } gcssb = (here->B4SOIcddb + cgso + here->B4SOIgcse) * ag0; gcsdb = here->B4SOIcdsb * ag0; gcseb = (here->B4SOIcdeb - here->B4SOIgcse) * ag0; /*fix below expression Wagner */ /*gcsT = model->B4SOItype * here->B4SOIcdT * ag0;*/ gcsT = here->B4SOIcdT * ag0; gcdsb = -(here->B4SOIcgdb + here->B4SOIcbdb + here->B4SOIcddb) * ag0; gcddb = (cgdo + here->B4SOIgcde - (here->B4SOIcgsb + here->B4SOIcbsb + here->B4SOIcdsb)) * ag0; gcdeb = -(here->B4SOIgcde + here->B4SOIcbeb + here->B4SOIcdeb + here->B4SOIcgeb + here->B4SOIceeb) * ag0; /* LFW_FD fix line */ /*fix below expression Wagner */ /*gcdT = - model->B4SOItype * (here->B4SOIcgT */ gcdT = - (here->B4SOIcgT + here->B4SOIcbT + here->B4SOIcdT + here->B4SOIceT) * ag0; /*fix below expression Wagner */ /*gcgT = model->B4SOItype * here->B4SOIcgT * ag0;*/ gcgT = here->B4SOIcgT * ag0; gcbeb = here->B4SOIcbeb * ag0; gcbT = model->B4SOItype * here->B4SOIcbT * ag0; /* v4.0 gcbsb = here->B4SOIcbdb * ag0; gcbdb = here->B4SOIcbsb * ag0; */ /* v4.0 */ if (!here->B4SOIrbodyMod) { gcjdbdp = gcjsbsp = 0.0; gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb + gcdeb); gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb + gcseb); gcdbdb = gcsbsb = 0.0; gcbdb = here->B4SOIcbsb * ag0; gcbsb = here->B4SOIcbdb * ag0; here->B4SOIGGjdb = GGjdb = 0.0; here->B4SOIGGjsb = GGjsb = 0.0; } else { /* v4.2 bugfix; qjd/qjs are not swapped gcjdbdp = gcjsbs * ag0; gcjsbsp = gcjdbs * ag0; */ gcjdbdp = gcjdbs * ag0; gcjsbsp = gcjsbs * ag0; gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb + gcdeb) + gcjdbdp; gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb + gcseb) + gcjsbsp; /* v4.2 bugfix; qjd/qjs are not swapped gcsbsb = gcjdds * ag0; gcdbdb = -(gcjdds + gcjdbs + gcjsbs) * ag0; */ gcsbsb = -gcjdbs * ag0; gcdbdb = -gcjsbs * ag0; gcbdb = here->B4SOIcbsb * ag0 - gcdbdb; gcbsb = here->B4SOIcbdb * ag0 - gcsbsb; here->B4SOIGGjdb = GGjdb = Gjsb; here->B4SOIGGjsb = GGjsb = Gjdb; } /* v4.0 end */ /* gcegb = (-pParam->B4SOIcgeo) * ag0; V3.2 bug fix */ gcesb = (- here->B4SOIgcse) * ag0; gcedb = (- here->B4SOIgcde) * ag0; gceeb = (here->B4SOIceeb + pParam->B4SOIcgeo + here->B4SOIgcse + here->B4SOIgcde) * ag0; gceT = model->B4SOItype * here->B4SOIceT * ag0; gcTt = pParam->B4SOIcth * ag0; dxpart = 0.6; sxpart = 0.4; /* v3.1 moved the following code ahead */ /* Lump the overlap capacitance */ /* qgd = qgdo; qgs = qgso; qge = pParam->B4SOIcgeo * vge; qgate += qgd + qgs + qge; qsrc = qdrn - qgs + here->B4SOIqse; qsub -= qge + here->B4SOIqse + here->B4SOIqde; qdrn = -(qgate + qbody + qsrc + qsub); */ /* v3.1 end */ } here->B4SOIcgdo = cgdo; here->B4SOIcgso = cgso; if (ByPass) goto line860; *(ckt->CKTstate0 + here->B4SOIqe) = qsub; *(ckt->CKTstate0 + here->B4SOIqg) = qgate; *(ckt->CKTstate0 + here->B4SOIqd) = qdrn; *(ckt->CKTstate0 + here->B4SOIqb) = qbody; if ((model->B4SOIshMod == 1) && (here->B4SOIrth0!=0.0)) *(ckt->CKTstate0 + here->B4SOIqth) = pParam->B4SOIcth * delTemp; if (here->B4SOIrgateMod == 3) /* 3.1 bug fix */ *(ckt->CKTstate0 + here->B4SOIqgmid) = qgmid; /* store small signal parameters */ if (ckt->CKTmode & MODEINITSMSIG) { goto line1000; } if (!ChargeComputationNeeded) goto line850; if (ckt->CKTmode & MODEINITTRAN) { *(ckt->CKTstate1 + here->B4SOIqb) = *(ckt->CKTstate0 + here->B4SOIqb); *(ckt->CKTstate1 + here->B4SOIqg) = *(ckt->CKTstate0 + here->B4SOIqg); *(ckt->CKTstate1 + here->B4SOIqd) = *(ckt->CKTstate0 + here->B4SOIqd); *(ckt->CKTstate1 + here->B4SOIqe) = *(ckt->CKTstate0 + here->B4SOIqe); *(ckt->CKTstate1 + here->B4SOIqth) = *(ckt->CKTstate0 + here->B4SOIqth); if (here->B4SOIrgateMod == 3) *(ckt->CKTstate1 + here->B4SOIqgmid) = *(ckt->CKTstate0 + here->B4SOIqgmid); if (here->B4SOIrbodyMod) /* v4.0 */ { *(ckt->CKTstate1 + here->B4SOIqbs) = *(ckt->CKTstate0 + here->B4SOIqbs); *(ckt->CKTstate1 + here->B4SOIqbd) = *(ckt->CKTstate0 + here->B4SOIqbd); } } error = NIintegrate(ckt, &geq, &ceq,0.0,here->B4SOIqb); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->B4SOIqg); if (error) return(error); error = NIintegrate(ckt,&geq, &ceq, 0.0, here->B4SOIqd); if (error) return(error); error = NIintegrate(ckt,&geq, &ceq, 0.0, here->B4SOIqe); if (error) return(error); if ((model->B4SOIshMod == 1) && (here->B4SOIrth0!=0.0)) { error = NIintegrate(ckt, &geq, &ceq, 0.0, here->B4SOIqth); if (error) return (error); } if (here->B4SOIrgateMod == 3) { error = NIintegrate(ckt, &geq, &ceq, 0.0, here->B4SOIqgmid); if (error) return(error); } /*3.1 bug fix*/ if (here->B4SOIrbodyMod) /* v4.0 */ { error = NIintegrate(ckt, &geq, &ceq, 0.0, here->B4SOIqbs); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->B4SOIqbd); if (error) return(error); } goto line860; line850: /* initialize to zero charge conductance and current */ ceqqe = ceqqg = ceqqb = ceqqd = ceqqth= 0.0; gcdgb = gcddb = gcdsb = gcdeb = gcdT = 0.0; gcsgb = gcsdb = gcssb = gcseb = gcsT = 0.0; gcggb = gcgdb = gcgsb = gcgeb = gcgT = 0.0; gcbgb = gcbdb = gcbsb = gcbeb = gcbT = 0.0; gcegb = gcedb = gceeb = gcesb = gceT = 0.0; gcTt = 0.0; /* v3.1 added for RF */ gcgmgmb = gcgmdb = gcgmsb = gcgmeb = 0.0; gcdgmb = gcsgmb = gcegmb = ceqqgmid = 0.0; gcgbb = gcsbb = gcdbb = 0.0; /* v3.1 added for RF end */ gcdbdb = gcsbsb = gcjdbdp = gcjsbsp = 0.0; /* v4.0 */ ceqqjd = ceqqjs = 0.0; /* v4.0 */ GGjdb = GGjsb = 0.0; /* v4.0 */ sxpart = (1.0 - (dxpart = (here->B4SOImode > 0) ? 0.4 : 0.6)); goto line900; line860: /* evaluate equivalent charge current */ cqgate = *(ckt->CKTstate0 + here->B4SOIcqg); cqbody = *(ckt->CKTstate0 + here->B4SOIcqb); cqdrn = *(ckt->CKTstate0 + here->B4SOIcqd); cqsub = *(ckt->CKTstate0 + here->B4SOIcqe); cqtemp = *(ckt->CKTstate0 + here->B4SOIcqth); here->B4SOIcb += cqbody; here->B4SOIcd += cqdrn; ceqqg = cqgate - gcggb * vgb + gcgdb * vbd + gcgsb * vbs - gcgeb * veb - gcgT * delTemp; ceqqb = cqbody - gcbgb * vgb + gcbdb * vbd + gcbsb * vbs - gcbeb * veb - gcbT * delTemp; /* v3.2 bug fix */ ceqqd = cqdrn - gcdgb * vgb + (gcddb + gcdbdb) * vbd + gcdsb * vbs - gcdeb * veb - gcdT * delTemp - gcdbdb * vbd_jct - gcdgmb * vgmb;/* v4.0 */ ceqqe = cqsub - gcegb * vgb + gcedb * vbd + gcesb * vbs - gceeb * veb - gceT * delTemp - gcegmb * vgmb; /* 3.2 bug fix */ ceqqth = cqtemp - gcTt * delTemp; /* v3.1 added for RF */ if (here->B4SOIrgateMod == 3) ceqqgmid = *(ckt->CKTstate0 + here->B4SOIcqgmid) + gcgmdb * vbd + gcgmsb * vbs - gcgmgmb * vgmb;/* 3.2 bug fix */ else ceqqgmid = 0.0; /* v3.1 added for RF end */ if (here->B4SOIrbodyMod) /* v4.0 */ { ceqqjs = *(ckt->CKTstate0 + here->B4SOIcqbs) + gcsbsb * vbs_jct; ceqqjd = *(ckt->CKTstate0 + here->B4SOIcqbd) + gcdbdb * vbd_jct; } if (ckt->CKTmode & MODEINITTRAN) { *(ckt->CKTstate1 + here->B4SOIcqe) = *(ckt->CKTstate0 + here->B4SOIcqe); *(ckt->CKTstate1 + here->B4SOIcqb) = *(ckt->CKTstate0 + here->B4SOIcqb); *(ckt->CKTstate1 + here->B4SOIcqg) = *(ckt->CKTstate0 + here->B4SOIcqg); *(ckt->CKTstate1 + here->B4SOIcqd) = *(ckt->CKTstate0 + here->B4SOIcqd); *(ckt->CKTstate1 + here->B4SOIcqth) = *(ckt->CKTstate0 + here->B4SOIcqth); if (here->B4SOIrgateMod == 3) /* v3.1 */ *(ckt->CKTstate1 + here->B4SOIcqgmid) = *(ckt->CKTstate0 + here->B4SOIcqgmid); if (here->B4SOIrbodyMod) /* v4.0 */ { *(ckt->CKTstate1 + here->B4SOIcqbs) = *(ckt->CKTstate0 + here->B4SOIcqbs); *(ckt->CKTstate1 + here->B4SOIcqbd) = *(ckt->CKTstate0 + here->B4SOIcqbd); } } /* * load current vector */ line900: if (here->B4SOImode >= 0) { Gm = here->B4SOIgm; Gmbs = here->B4SOIgmbs; /* v3.0 */ Gme = here->B4SOIgme; GmT = model->B4SOItype * here->B4SOIgmT; FwdSum = Gm + Gmbs + Gme; /* v3.0 */ RevSum = 0.0; /* v2.2.2 bug fix */ cdreq = model->B4SOItype * (here->B4SOIcdrain - here->B4SOIgds * vds - Gm * vgs - Gmbs * vbs - Gme * ves) - GmT * delTemp; /* v3.0 */ /* ceqbs now is compatible with cdreq, ie. going in is +ve */ /* Equivalent current source from the diode */ ceqbs = here->B4SOIcjs; ceqbd = here->B4SOIcjd; cdbdp = Idbdp; csbsp = Isbsp; /* Current going in is +ve */ ceqbody = -here->B4SOIcbody; ceqgate = here->B4SOIcgate; gigg = here->B4SOIgigg; gigb = here->B4SOIgigb; gige = here->B4SOIgige; /* v3.0 */ gigs = here->B4SOIgigs; gigd = here->B4SOIgigd; gigT = model->B4SOItype * here->B4SOIgigT; ceqth = here->B4SOIcth; ceqbodcon = here->B4SOIcbodcon; /* v4.1 */ gigpg = here->B4SOIgigpg; gigpp = here->B4SOIgigpp; ceqgate += (here->B4SOIigp - gigpg * vgp); if(here->B4SOIbodyMod == 1) ceqbodcon += (here->B4SOIigp - gigpg * vgp); else if(here->B4SOIbodyMod == 2) ceqbody -= (here->B4SOIigp - gigpg * vgp); gbbg = -here->B4SOIgbgs; gbbdp = -here->B4SOIgbds; gbbb = -here->B4SOIgbbs; gbbp = -here->B4SOIgbps; gbbT = -model->B4SOItype * here->B4SOIgbT; /* v3.0 */ gbbe = -here->B4SOIgbes; if (here->B4SOIrbodyMod) { /* v4.0 */ gbbdp = -Giid - Ggidld - Ggisls; gbbb = -Giib + Gbpbs; gjsdb = Gjsb + Gjdb; } gbbsp = - ( gbbg + gbbdp + gbbb + gbbp + gbbe); gddpg = -here->B4SOIgjdg; gddpdp = -here->B4SOIgjdd; if (!here->B4SOIrbodyMod) /* v4.0 */ gddpb = -here->B4SOIgjdb; else gddpb = Giib + Ggidlb + Ggislb; gddpT = -model->B4SOItype * here->B4SOIgjdT; /* v3.0 */ gddpe = -here->B4SOIgjde; gddpsp = - ( gddpg + gddpdp + gddpb + gddpe); gsspg = -here->B4SOIgjsg; gsspdp = -here->B4SOIgjsd; if (!here->B4SOIrbodyMod) gsspb = -here->B4SOIgjsb; else gsspb = 0.0; gsspT = -model->B4SOItype * here->B4SOIgjsT; /* v3.0 */ gsspe = 0.0; gsspsp = - (gsspg + gsspdp + gsspb + gsspe); gppb = -here->B4SOIgbpbs; gppp = -here->B4SOIgbpps; gTtg = here->B4SOIgtempg; gTtb = here->B4SOIgtempb; gTtdp = here->B4SOIgtempd; gTtt = here->B4SOIgtempT; /* v3.0 */ gTte = here->B4SOIgtempe; gTtsp = - (gTtg + gTtb + gTtdp + gTte); /* v3.0 */ if (model->B4SOIigcMod) { gIstotg = here->B4SOIgIgsg + here->B4SOIgIgcsg; gIstotd = here->B4SOIgIgcsd; gIstots = here->B4SOIgIgss + here->B4SOIgIgcss; gIstotb = here->B4SOIgIgcsb; Istoteq = model->B4SOItype * (here->B4SOIIgs + here->B4SOIIgcs - gIstotg * vgs - here->B4SOIgIgcsd * vds - here->B4SOIgIgcsb * vbs); gIdtotg = here->B4SOIgIgdg + here->B4SOIgIgcdg; gIdtotd = here->B4SOIgIgdd + here->B4SOIgIgcdd; gIdtots = here->B4SOIgIgcds; gIdtotb = here->B4SOIgIgcdb; Idtoteq = model->B4SOItype * (here->B4SOIIgd + here->B4SOIIgcd - here->B4SOIgIgdg * vgd - here->B4SOIgIgcdg * vgs - here->B4SOIgIgcdd * vds - here->B4SOIgIgcdb * vbs); gIgtotg = gIstotg + gIdtotg; gIgtotd = gIstotd + gIdtotd; gIgtots = gIstots + gIdtots; gIgtotb = gIstotb + gIdtotb; Igtoteq = Istoteq + Idtoteq; } else { gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0; gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0; gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0; } /* v3.1 added for RF */ if (here->B4SOIrgateMod == 2) T0 = vges - vgs; else if (here->B4SOIrgateMod == 3) T0 = vgms - vgs; if (here->B4SOIrgateMod > 1) { gcrgd = here->B4SOIgcrgd * T0; gcrgg = here->B4SOIgcrgg * T0; gcrgs = here->B4SOIgcrgs * T0; gcrgb = here->B4SOIgcrgb * T0; ceqgcrg = -(gcrgd * vds + gcrgg * vgs + gcrgb * vbs); gcrgg -= here->B4SOIgcrg; gcrg = here->B4SOIgcrg; } else ceqgcrg = gcrg = gcrgd = gcrgg = gcrgs = gcrgb = 0.0; /* v3.1 added for RF end */ } /* end of soimode>=0 */ else { Gm = -here->B4SOIgm; Gmbs = -here->B4SOIgmbs; /* v3.0 */ Gme = -here->B4SOIgme; GmT = -model->B4SOItype * here->B4SOIgmT; FwdSum = 0.0; RevSum = -(Gm + Gmbs + Gme); /* v3.0 */ /* v3.1 bug fix */ cdreq = -model->B4SOItype * (here->B4SOIcdrain + here->B4SOIgds*vds + Gm * vgd + Gmbs * vbd + Gme * (ves - vds)) - GmT * delTemp; ceqbs = here->B4SOIcjd; ceqbd = here->B4SOIcjs; csbsp = Idbdp; cdbdp = Isbsp; /* Current going in is +ve */ ceqbody = -here->B4SOIcbody; ceqgate = here->B4SOIcgate; gigg = here->B4SOIgigg; gigb = here->B4SOIgigb; gige = here->B4SOIgige; /* v3.0 */ gigs = here->B4SOIgigd; gigd = here->B4SOIgigs; gigT = model->B4SOItype * here->B4SOIgigT; ceqth = here->B4SOIcth; ceqbodcon = here->B4SOIcbodcon; /* v4.1 */ gigpg = here->B4SOIgigpg; gigpp = here->B4SOIgigpp; ceqgate += (here->B4SOIigp - gigpg * vgp); if(here->B4SOIbodyMod == 1) ceqbodcon += (here->B4SOIigp - gigpg * vgp); else if(here->B4SOIbodyMod == 2) ceqbody -= (here->B4SOIigp - gigpg * vgp); gbbg = -here->B4SOIgbgs; gbbb = -here->B4SOIgbbs; gbbp = -here->B4SOIgbps; gbbsp = -here->B4SOIgbds; gbbT = -model->B4SOItype * here->B4SOIgbT; /* v3.0 */ gbbe = -here->B4SOIgbes; if (here->B4SOIrbodyMod) { /* v4.0 */ gbbsp = -Giid - Ggidld - Ggisls; gbbb = -Giib + Gbpbs; gjsdb = Gjsb + Gjdb; } gbbdp = - ( gbbg + gbbsp + gbbb + gbbp + gbbe); gddpg = -here->B4SOIgjsg; gddpsp = -here->B4SOIgjsd; if (!here->B4SOIrbodyMod) gddpb = -here->B4SOIgjsb; else gddpb = 0.0; gddpT = -model->B4SOItype * here->B4SOIgjsT; /* v3.0 */ gddpe = 0.0; gddpdp = - (gddpg + gddpsp + gddpb + gddpe); gsspg = -here->B4SOIgjdg; gsspsp = -here->B4SOIgjdd; if (!here->B4SOIrbodyMod) gsspb = -here->B4SOIgjdb; else gsspb = Giib + Ggidlb + Ggislb; gsspT = -model->B4SOItype * here->B4SOIgjdT; /* v3.0 */ gsspe = -here->B4SOIgjde; gsspdp = - ( gsspg + gsspsp + gsspb + gsspe); gppb = -here->B4SOIgbpbs; gppp = -here->B4SOIgbpps; gTtg = here->B4SOIgtempg; gTtb = here->B4SOIgtempb; gTtsp = here->B4SOIgtempd; gTtt = here->B4SOIgtempT; /* v3.0 */ gTte = here->B4SOIgtempe; gTtdp = - (gTtg + gTtb + gTtsp + gTte); /* v3.0 */ if (model->B4SOIigcMod) { gIstotg = here->B4SOIgIgsg + here->B4SOIgIgcdg; gIstotd = here->B4SOIgIgcds; gIstots = here->B4SOIgIgss + here->B4SOIgIgcdd; gIstotb = here->B4SOIgIgcdb; Istoteq = model->B4SOItype * (here->B4SOIIgs + here->B4SOIIgcd - here->B4SOIgIgsg * vgs - here->B4SOIgIgcdg * vgd + here->B4SOIgIgcdd * vds - here->B4SOIgIgcdb * vbd); gIdtotg = here->B4SOIgIgdg + here->B4SOIgIgcsg; gIdtotd = here->B4SOIgIgdd + here->B4SOIgIgcss; gIdtots = here->B4SOIgIgcsd; gIdtotb = here->B4SOIgIgcsb; Idtoteq = model->B4SOItype * (here->B4SOIIgd + here->B4SOIIgcs - (here->B4SOIgIgdg + here->B4SOIgIgcsg) * vgd + here->B4SOIgIgcsd * vds - here->B4SOIgIgcsb * vbd); gIgtotg = gIstotg + gIdtotg; gIgtotd = gIstotd + gIdtotd; gIgtots = gIstots + gIdtots; gIgtotb = gIstotb + gIdtotb; Igtoteq = Istoteq + Idtoteq; } else { gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0; gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0; gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0; } /* v3.1 added for RF */ if (here->B4SOIrgateMod == 2) T0 = vges - vgs; else if (here->B4SOIrgateMod == 3) T0 = vgms - vgs; if (here->B4SOIrgateMod > 1) { gcrgd = here->B4SOIgcrgs * T0; gcrgg = here->B4SOIgcrgg * T0; gcrgs = here->B4SOIgcrgd * T0; gcrgb = here->B4SOIgcrgb * T0; ceqgcrg = -(gcrgg * vgd - gcrgs * vds + gcrgb * vbd); gcrgg -= here->B4SOIgcrg; gcrg = here->B4SOIgcrg; } else ceqgcrg = gcrg = gcrgd = gcrgg = gcrgs = gcrgb = 0.0; /* v3.1 added for RF end */ } /* end of soimod<0 */ if (model->B4SOIrdsMod == 1) { ceqgstot = model->B4SOItype * (here->B4SOIgstotd * vds + here->B4SOIgstotg * vgs + here->B4SOIgstotb * vbs); /* ceqgstot flowing away from sNodePrime */ gstot = here->B4SOIgstot; gstotd = here->B4SOIgstotd; gstotg = here->B4SOIgstotg; gstots = here->B4SOIgstots - gstot; gstotb = here->B4SOIgstotb; ceqgdtot = -model->B4SOItype * (here->B4SOIgdtotd * vds + here->B4SOIgdtotg * vgs + here->B4SOIgdtotb * vbs); /* ceqgdtot defined as flowing into dNodePrime */ gdtot = here->B4SOIgdtot; gdtotd = here->B4SOIgdtotd - gdtot; gdtotg = here->B4SOIgdtotg; gdtots = here->B4SOIgdtots; gdtotb = here->B4SOIgdtotb; } else { gstot = gstotd = gstotg = gstots = gstotb = ceqgstot = 0.0; gdtot = gdtotd = gdtotg = gdtots = gdtotb = ceqgdtot = 0.0; } if (model->B4SOItype > 0) { ceqqg = ceqqg; ceqqb = ceqqb; ceqqe = ceqqe; ceqqd = ceqqd; } else { ceqbodcon = -ceqbodcon; ceqbody = -ceqbody; ceqgate = -ceqgate; ceqbs = -ceqbs; ceqbd = -ceqbd; ceqqg = -ceqqg; ceqqb = -ceqqb; ceqqd = -ceqqd; ceqqe = -ceqqe; cdbdp = - cdbdp; /* v4.0 */ csbsp = - csbsp; /* v4.0 */ ceqgcrg = -ceqgcrg; /* v3.1 */ if (here->B4SOIrgateMod == 3) ceqqgmid = -ceqqgmid; if (here->B4SOIrbodyMod) /* v4.0 */ { ceqqjs = -ceqqjs; ceqqjd = -ceqqjd; } } m = here->B4SOIm; /* v3.1 */ #ifndef USE_OMP /* v3.1 added ceqgcrg for RF */ (*(ckt->CKTrhs + here->B4SOIgNode) -= m * ((ceqgate + ceqqg) + Igtoteq - ceqgcrg)); /* v3.1 added ceqgcrg for RF end */ (*(ckt->CKTrhs + here->B4SOIdNodePrime) += m * ((ceqbd - cdreq - ceqqd) + Idtoteq /* v4.0 */ + ceqgdtot)); if (!here->B4SOIrbodyMod) { (*(ckt->CKTrhs + here->B4SOIsNodePrime) += m * ((cdreq + ceqbs + ceqqg + ceqqb + ceqqd + ceqqe) + Istoteq + ceqqgmid - ceqgstot)); /* v4.0 */ } else { /* v4.0 */ (*(ckt->CKTrhs + here->B4SOIsNodePrime) += m * ((cdreq + ceqbs + ceqqg + ceqqb + ceqqd + ceqqe) + Istoteq + ceqqgmid + ceqqjd + ceqqjs - ceqgstot)); } (*(ckt->CKTrhs + here->B4SOIeNode) -= m * ceqqe); if (here->B4SOIrgateMod == 2) (*(ckt->CKTrhs + here->B4SOIgNodeExt) -= m * ceqgcrg); else if (here->B4SOIrgateMod == 3) (*(ckt->CKTrhs + here->B4SOIgNodeMid) -= m * (ceqqgmid + ceqgcrg)); if (here->B4SOIbodyMod == 1) { (*(ckt->CKTrhs + here->B4SOIpNode) += m * ceqbodcon); } if ( here->B4SOIsoiMod != 2 ) {if (!here->B4SOIrbodyMod) (*(ckt->CKTrhs + here->B4SOIbNode) -= m * (ceqbody + ceqqb)); else /* v4.0 */ { (*(ckt->CKTrhs + here->B4SOIdbNode) -= m * (cdbdp + ceqqjd)); (*(ckt->CKTrhs + here->B4SOIbNode) -= m * (ceqbody + ceqqb)); (*(ckt->CKTrhs + here->B4SOIsbNode) -= m * (csbsp + ceqqjs)); } } if (selfheat) { (*(ckt->CKTrhs + here->B4SOItempNode) -= m * (ceqth + ceqqth)); } if (model->B4SOIrdsMod) { (*(ckt->CKTrhs + here->B4SOIdNode) -= m * ceqgdtot); (*(ckt->CKTrhs + here->B4SOIsNode) += m * ceqgstot); } #else /* OpenMP parallelization: Temporary storage of right hand side values into instance storage space. Update to matrix will be done by function B4SOILoadRhsMat() only when all instances have their values stored. */ /* v3.1 added ceqgcrg for RF */ here->B4SOINode_1 = m * ((ceqgate + ceqqg) + Igtoteq - ceqgcrg); /* v3.1 added ceqgcrg for RF end */ here->B4SOINode_2 = m * ((ceqbd - cdreq - ceqqd) + Idtoteq /* v4.0 */ + ceqgdtot); if (!here->B4SOIrbodyMod) { here->B4SOINode_3 = m * ((cdreq + ceqbs + ceqqg + ceqqb + ceqqd + ceqqe) + Istoteq + ceqqgmid - ceqgstot); /* v4.0 */ } else { /* v4.0 */ here->B4SOINode_4 = m * ((cdreq + ceqbs + ceqqg + ceqqb + ceqqd + ceqqe) + Istoteq + ceqqgmid + ceqqjd + ceqqjs - ceqgstot); } here->B4SOINode_5 = m * ceqqe; if (here->B4SOIrgateMod == 2) here->B4SOINode_6 = m * ceqgcrg; else if (here->B4SOIrgateMod == 3) here->B4SOINode_7 = m * (ceqqgmid + ceqgcrg); if (here->B4SOIbodyMod == 1) { here->B4SOINode_8 = m * ceqbodcon; } if ( here->B4SOIsoiMod != 2 ) {if (!here->B4SOIrbodyMod) here->B4SOINode_9 = m * (ceqbody + ceqqb); else /* v4.0 */ { here->B4SOINode_10 = m * (cdbdp + ceqqjd); here->B4SOINode_11 = m * (ceqbody + ceqqb); here->B4SOINode_12 = m * (csbsp + ceqqjs); } } here->B4SOINode_sh = selfheat; if (selfheat) { here->B4SOINode_13 = m * (ceqth + ceqqth); } if (model->B4SOIrdsMod) { here->B4SOINode_14 = m * ceqgdtot; here->B4SOINode_15 = m * ceqgstot; } #endif if (here->B4SOIdebugMod != 0) { *(ckt->CKTrhs + here->B4SOIvbsNode) = here->B4SOIvbseff; *(ckt->CKTrhs + here->B4SOIidsNode) = FLOG(here->B4SOIids); *(ckt->CKTrhs + here->B4SOIicNode) = FLOG(here->B4SOIic); *(ckt->CKTrhs + here->B4SOIibsNode) = FLOG(here->B4SOIibs); *(ckt->CKTrhs + here->B4SOIibdNode) = FLOG(here->B4SOIibd); *(ckt->CKTrhs + here->B4SOIiiiNode) = FLOG(here->B4SOIiii); *(ckt->CKTrhs + here->B4SOIigNode) = here->B4SOIig; *(ckt->CKTrhs + here->B4SOIgiggNode) = here->B4SOIgigg; *(ckt->CKTrhs + here->B4SOIgigdNode) = here->B4SOIgigd; *(ckt->CKTrhs + here->B4SOIgigbNode) = here->B4SOIgigb; *(ckt->CKTrhs + here->B4SOIigidlNode) = here->B4SOIigidl; *(ckt->CKTrhs + here->B4SOIitunNode) = here->B4SOIitun; *(ckt->CKTrhs + here->B4SOIibpNode) = here->B4SOIibp; *(ckt->CKTrhs + here->B4SOIcbbNode) = here->B4SOIcbb; *(ckt->CKTrhs + here->B4SOIcbdNode) = here->B4SOIcbd; *(ckt->CKTrhs + here->B4SOIcbgNode) = here->B4SOIcbg; *(ckt->CKTrhs + here->B4SOIqbfNode) = here->B4SOIqbf; *(ckt->CKTrhs + here->B4SOIqjsNode) = here->B4SOIqjs; *(ckt->CKTrhs + here->B4SOIqjdNode) = here->B4SOIqjd; } if (!model->B4SOIrdsMod) { gdpr = here->B4SOIdrainConductance; gspr = here->B4SOIsourceConductance; } else gdpr = gspr = 0.0; /* v4.0 */ /* * load y matrix */ Gmin = ckt->CKTgmin * 1e-6; /* v3.1 added for RF */ geltd = here->B4SOIgrgeltd; #ifndef USE_OMP if (here->B4SOIrgateMod == 1) { *(here->B4SOIGEgePtr) += m * geltd; *(here->B4SOIGgePtr) -= m * geltd; *(here->B4SOIGEgPtr) -= m * geltd; } else if (here->B4SOIrgateMod == 2) { *(here->B4SOIGEgePtr) += m * gcrg; *(here->B4SOIGEgPtr) += m * gcrgg; *(here->B4SOIGEdpPtr) += m * gcrgd; *(here->B4SOIGEspPtr) += m * gcrgs; *(here->B4SOIGgePtr) -= m * gcrg; if (here->B4SOIsoiMod !=2) /* v3.2 */ *(here->B4SOIGEbPtr) += m * gcrgb; } else if (here->B4SOIrgateMod == 3) { *(here->B4SOIGEgePtr) += m * geltd; *(here->B4SOIGEgmPtr) -= m * geltd; *(here->B4SOIGMgePtr) -= m * geltd; *(here->B4SOIGMgmPtr) += m * (geltd + gcrg + gcgmgmb); *(here->B4SOIGMdpPtr) += m * (gcrgd + gcgmdb); *(here->B4SOIGMgPtr) += m * gcrgg; *(here->B4SOIGMspPtr) += m * (gcrgs + gcgmsb); *(here->B4SOIGMePtr) += m * gcgmeb; if (here->B4SOIsoiMod !=2) /* v3.2 */ *(here->B4SOIGMbPtr) += m * gcrgb; *(here->B4SOIDPgmPtr) += m * gcdgmb; *(here->B4SOIGgmPtr) -= m * gcrg; *(here->B4SOISPgmPtr) += m * gcsgmb; *(here->B4SOIEgmPtr) += m * gcegmb; } /* v3.1 added for RF end*/ /* v3.0 */ if (here->B4SOIsoiMod != 0) /* v3.2 */ { (*(here->B4SOIDPePtr) += m * (Gme + gddpe)); (*(here->B4SOISPePtr) += m * (gsspe - Gme)); if (here->B4SOIsoiMod != 2) /* v3.2 */ { *(here->B4SOIGePtr) += m * gige; *(here->B4SOIBePtr) -= m * gige; } } *(here->B4SOIEdpPtr) += m * gcedb; *(here->B4SOIEspPtr) += m * gcesb; *(here->B4SOIDPePtr) += m * gcdeb; *(here->B4SOISPePtr) += m * gcseb; *(here->B4SOIEgPtr) += m * gcegb; *(here->B4SOIGePtr) += m * gcgeb; /* v3.1 */ if (here->B4SOIsoiMod != 2) /* v3.2 */ { (*(here->B4SOIEbPtr) -= m * (gcegb + gcedb + gcesb + gceeb + gcegmb)); /* 3.2 bug fix */ /* v3.1 changed GbPtr for RF */ if ((here->B4SOIrgateMod == 0) || (here->B4SOIrgateMod == 1)) (*(here->B4SOIGbPtr) -= m * (-gigb + gcggb + gcgdb + gcgsb + gcgeb - gIgtotb)); else /* v3.1 for rgateMod = 2 or 3 */ *(here->B4SOIGbPtr) += m * (gigb + gcgbb +gIgtotb - gcrgb); (*(here->B4SOIDPbPtr) -= m * (-gddpb - Gmbs - gcdbb + gdtotb + gIdtotb )); /* v4.0 */ /* (*(here->B4SOIDPbPtr) -= (-gddpb - Gmbs + gcdgb + gcddb + gcdeb + gcdsb) + gcdgmb + gIdtotb ); */ (*(here->B4SOISPbPtr) -= m * (-gsspb + Gmbs - gcsbb + gstotb + Gmin + gIstotb)); /* v4.0 */ /* (*(here->B4SOISPbPtr) -= (-gsspb + Gmbs + gcsgb + gcsdb + gcseb + gcssb) + gcsgmb + Gmin + gIstotb); */ (*(here->B4SOIBePtr) += m * (gbbe + gcbeb)); /* v3.0 */ (*(here->B4SOIBgPtr) += m * (-gigg + gcbgb + gbbg)); (*(here->B4SOIBdpPtr) += m * (-gigd + gcbdb + gbbdp )); (*(here->B4SOIBspPtr) += m * (gcbsb + gbbsp - Gmin - gigs)); /* if (!here->B4SOIrbodyMod) */ (*(here->B4SOIBbPtr) += m * (-gigb + gbbb - gcbgb - gcbdb - gcbsb - gcbeb + Gmin)) ; /* else (*(here->B4SOIBbPtr) += -gigb - (Giib - Gbpbs) - gcbgb - gcbdb - gcbsb - gcbeb + Gmin) ; */ /* v4.0 */ if (here->B4SOIrbodyMod) { (*(here->B4SOIDPdbPtr) += m * (-gcjdbdp - GGjdb)); (*(here->B4SOISPsbPtr) += m * (-gcjsbsp - GGjsb)); (*(here->B4SOIDBdpPtr) += m * (-gcjdbdp - GGjdb)); (*(here->B4SOIDBdbPtr) += m * (gcjdbdp + GGjdb + here->B4SOIgrbdb)); (*(here->B4SOIDBbPtr) -= m * here->B4SOIgrbdb); (*(here->B4SOISBspPtr) += m * (-gcjsbsp - GGjsb)); (*(here->B4SOISBbPtr) -= m * here->B4SOIgrbsb); (*(here->B4SOISBsbPtr) += m * (gcjsbsp + GGjsb + here->B4SOIgrbsb)); (*(here->B4SOIBdbPtr) -= m * here->B4SOIgrbdb); (*(here->B4SOIBsbPtr) -= m * here->B4SOIgrbsb); (*(here->B4SOIBbPtr) += m * (here->B4SOIgrbsb + here->B4SOIgrbdb)); } if (model->B4SOIrdsMod) { (*(here->B4SOIDbPtr) += m * gdtotb); (*(here->B4SOISbPtr) += m * gstotb); } } /* v3.1 */ if (model->B4SOIrdsMod) { (*(here->B4SOIDgPtr) += m * gdtotg); (*(here->B4SOIDspPtr) += m * gdtots); (*(here->B4SOISdpPtr) += m * gstotd); (*(here->B4SOISgPtr) += m * gstotg); } (*(here->B4SOIEePtr) += m * gceeb); if (here->B4SOIrgateMod == 0) { (*(here->B4SOIGgPtr) += m * (gigg + gcggb + Gmin + gIgtotg)); (*(here->B4SOIGdpPtr) += m * (gigd + gcgdb - Gmin + gIgtotd)); (*(here->B4SOIGspPtr) += m * (gcgsb + gigs + gIgtots)); } else if (here->B4SOIrgateMod == 1) /* v3.1 for RF */ { *(here->B4SOIGgPtr) += m * (gigg + gcggb + Gmin + gIgtotg + geltd); *(here->B4SOIGdpPtr) += m * (gigd + gcgdb - Gmin + gIgtotd); *(here->B4SOIGspPtr) += m * (gcgsb + gigs + gIgtots); } else /* v3.1 for RF rgateMod == 2 or 3 */ { *(here->B4SOIGgPtr) += m * (gigg + gcggb + Gmin + gIgtotg - gcrgg); *(here->B4SOIGdpPtr) += m * (gigd + gcgdb - Gmin + gIgtotd - gcrgd); *(here->B4SOIGspPtr) += m * (gcgsb + gigs + gIgtots - gcrgs); } (*(here->B4SOIDPgPtr) += m * ((Gm + gcdgb) + gddpg - Gmin - gIdtotg - gdtotg)); /* v4.0 */ (*(here->B4SOIDPdpPtr) += m * ((gdpr + here->B4SOIgds + gddpdp + RevSum + gcddb) + Gmin - gIdtotd - gdtotd)); /* v4.0 */ (*(here->B4SOIDPspPtr) -= m * ((-gddpsp + here->B4SOIgds + FwdSum - gcdsb) + gIdtots + gdtots)); (*(here->B4SOIDPdPtr) -= m * (gdpr + gdtot)); (*(here->B4SOISPgPtr) += m * (gcsgb - Gm + gsspg - gIstotg - gstotg)); /* v4.0 */ (*(here->B4SOISPdpPtr) -= m * ((here->B4SOIgds - gsspdp + RevSum - gcsdb + gIstotd) + gstotd)); /* v4.0 */ (*(here->B4SOISPspPtr) += m * ((gspr - gstots + here->B4SOIgds + gsspsp + FwdSum + gcssb) + Gmin - gIstots)); /* v4.0 */ (*(here->B4SOISPsPtr) -= m * (gspr + gstot)); (*(here->B4SOIDdPtr) += m * (gdpr + gdtot)); (*(here->B4SOIDdpPtr) -= m * (gdpr - gdtotd)); (*(here->B4SOISsPtr) += m * (gspr + gstot)); (*(here->B4SOISspPtr) -= m * (gspr - gstots)); if (here->B4SOIbodyMod == 1) { (*(here->B4SOIBpPtr) -= m * gppp); (*(here->B4SOIPbPtr) += m * gppb); (*(here->B4SOIPpPtr) += m * gppp); } /* v4.1 Ig_agbcp2 stamping */ (*(here->B4SOIGgPtr) += m * gigpg); if (here->B4SOIbodyMod == 1) { (*(here->B4SOIPpPtr) -= m * gigpp); (*(here->B4SOIPgPtr) -= m * gigpg); (*(here->B4SOIGpPtr) += m * gigpp); } else if(here->B4SOIbodyMod == 2) { (*(here->B4SOIBbPtr) -= m * gigpp); (*(here->B4SOIBgPtr) -= m * gigpg); (*(here->B4SOIGbPtr) += m * gigpp); } if (selfheat) { (*(here->B4SOIDPtempPtr) += m * (GmT + gddpT + gcdT)); (*(here->B4SOISPtempPtr) += m * (-GmT + gsspT + gcsT)); (*(here->B4SOIBtempPtr) += m * (gbbT + gcbT - gigT)); (*(here->B4SOIEtempPtr) += m * gceT); (*(here->B4SOIGtempPtr) += m * (gcgT + gigT)); (*(here->B4SOITemptempPtr) += m * (gTtt + 1/pParam->B4SOIrth + gcTt)); (*(here->B4SOITempgPtr) += m * gTtg); (*(here->B4SOITempbPtr) += m * gTtb); (*(here->B4SOITempdpPtr) += m * gTtdp); (*(here->B4SOITempspPtr) += m * gTtsp); /* v3.0 */ if (here->B4SOIsoiMod != 0) /* v3.2 */ (*(here->B4SOITempePtr) += m * gTte); } #else /* OpenMP parallelization: Temporary storage of matrix values into instance storage space. Update to matrix will be done by function B4SOILoadRhsMat() only when all instances have their values stored. */ if (here->B4SOIrgateMod == 1) { here->B4SOI_1 = m * geltd; here->B4SOI_2 = m * geltd; here->B4SOI_3 = m * geltd; } else if (here->B4SOIrgateMod == 2) { here->B4SOI_4 = m * gcrg; here->B4SOI_5 = m * gcrgg; here->B4SOI_6 = m * gcrgd; here->B4SOI_7 = m * gcrgs; here->B4SOI_8 = m * gcrg; if (here->B4SOIsoiMod !=2) /* v3.2 */ here->B4SOI_9 = m * gcrgb; } else if (here->B4SOIrgateMod == 3) { here->B4SOI_10 = m * geltd; here->B4SOI_11 = m * geltd; here->B4SOI_12 = m * geltd; here->B4SOI_13 = m * (geltd + gcrg + gcgmgmb); here->B4SOI_14 = m * (gcrgd + gcgmdb); here->B4SOI_15 = m * gcrgg; here->B4SOI_16 = m * (gcrgs + gcgmsb); here->B4SOI_17 = m * gcgmeb; if (here->B4SOIsoiMod !=2) /* v3.2 */ here->B4SOI_18 = m * gcrgb; here->B4SOI_19 = m * gcdgmb; here->B4SOI_20 = m * gcrg; here->B4SOI_21 = m * gcsgmb; here->B4SOI_22 = m * gcegmb; } /* v3.1 added for RF end*/ /* v3.0 */ if (here->B4SOIsoiMod != 0) /* v3.2 */ { here->B4SOI_23 = m * (Gme + gddpe); here->B4SOI_24 = m * (gsspe - Gme); if (here->B4SOIsoiMod != 2) /* v3.2 */ { here->B4SOI_25 = m * gige; here->B4SOI_26 = m * gige; } } here->B4SOI_27 = m * gcedb; here->B4SOI_28 = m * gcesb; here->B4SOI_29 = m * gcdeb; here->B4SOI_30 = m * gcseb; here->B4SOI_31 = m * gcegb; here->B4SOI_32 = m * gcgeb; /* v3.1 */ if (here->B4SOIsoiMod != 2) /* v3.2 */ { here->B4SOI_33 = m * (gcegb + gcedb + gcesb + gceeb + gcegmb); /* 3.2 bug fix */ /* v3.1 changed GbPtr for RF */ if ((here->B4SOIrgateMod == 0) || (here->B4SOIrgateMod == 1)) (here->B4SOI_34 = m * (-gigb + gcggb + gcgdb + gcgsb + gcgeb - gIgtotb)); else /* v3.1 for rgateMod = 2 or 3 */ here->B4SOI_35 = m * (gigb + gcgbb +gIgtotb - gcrgb); here->B4SOI_36 = m * (-gddpb - Gmbs - gcdbb + gdtotb + gIdtotb ); /* v4.0 */ /* (*(here->B4SOIDPbPtr) -= (-gddpb - Gmbs + gcdgb + gcddb + gcdeb + gcdsb) + gcdgmb + gIdtotb ); */ (here->B4SOI_37 = m * (-gsspb + Gmbs - gcsbb + gstotb + Gmin + gIstotb)); /* v4.0 */ /* (*(here->B4SOISPbPtr) -= (-gsspb + Gmbs + gcsgb + gcsdb + gcseb + gcssb) + gcsgmb + Gmin + gIstotb); */ (here->B4SOI_38 = m * (gbbe + gcbeb)); /* v3.0 */ (here->B4SOI_39 = m * (-gigg + gcbgb + gbbg)); (here->B4SOI_40 = m * (-gigd + gcbdb + gbbdp)); (here->B4SOI_41 = m * (gcbsb + gbbsp - Gmin - gigs)); /* if (!here->B4SOIrbodyMod) */ (here->B4SOI_42 = m * (-gigb + gbbb - gcbgb - gcbdb - gcbsb - gcbeb + Gmin)); /* else (*(here->B4SOIBbPtr) += -gigb - (Giib - Gbpbs) - gcbgb - gcbdb - gcbsb - gcbeb + Gmin) ; */ /* v4.0 */ if (here->B4SOIrbodyMod) { (here->B4SOI_43 = m * (-gcjdbdp - GGjdb)); (here->B4SOI_44 = m * (-gcjsbsp - GGjsb)); (here->B4SOI_45 = m * (-gcjdbdp - GGjdb)); (here->B4SOI_46 = m * (gcjdbdp + GGjdb + here->B4SOIgrbdb)); (here->B4SOI_47 = m * here->B4SOIgrbdb); (here->B4SOI_48 = m * (-gcjsbsp - GGjsb)); (here->B4SOI_49 = m * here->B4SOIgrbsb); (here->B4SOI_50 = m * (gcjsbsp + GGjsb + here->B4SOIgrbsb)); (here->B4SOI_51 = m * here->B4SOIgrbdb); (here->B4SOI_52 = m * here->B4SOIgrbsb); (here->B4SOI_53 = m * (here->B4SOIgrbsb + here->B4SOIgrbdb)); } if (model->B4SOIrdsMod) { (here->B4SOI_54 = m * gdtotb); (here->B4SOI_55 = m * gstotb); } } /* v3.1 */ if (model->B4SOIrdsMod) { (here->B4SOI_56 = m * gdtotg); (here->B4SOI_57 = m * gdtots); (here->B4SOI_58 = m * gstotd); (here->B4SOI_59 = m * gstotg); } (here->B4SOI_60 = m * gceeb); if (here->B4SOIrgateMod == 0) { (here->B4SOI_61 = m * (gigg + gcggb + Gmin + gIgtotg)); (here->B4SOI_62 =m * ( gigd + gcgdb - Gmin + gIgtotd)); (here->B4SOI_63 = m * (gcgsb + gigs + gIgtots)); } else if (here->B4SOIrgateMod == 1) /* v3.1 for RF */ { here->B4SOI_64 = m * (gigg + gcggb + Gmin + gIgtotg + geltd); here->B4SOI_65 = m * (gigd + gcgdb - Gmin + gIgtotd); here->B4SOI_66 = m * (gcgsb + gigs + gIgtots); } else /* v3.1 for RF rgateMod == 2 or 3 */ { here->B4SOI_67 = m * (gigg + gcggb + Gmin + gIgtotg - gcrgg); here->B4SOI_68 = m * (gigd + gcgdb - Gmin + gIgtotd - gcrgd); here->B4SOI_69 = m * (gcgsb + gigs + gIgtots - gcrgs); } (here->B4SOI_70 = m * ((Gm + gcdgb) + gddpg - Gmin - gIdtotg - gdtotg)); /* v4.0 */ (here->B4SOI_71 = m * ((gdpr + here->B4SOIgds + gddpdp + RevSum + gcddb) + Gmin - gIdtotd - gdtotd)); /* v4.0 */ (here->B4SOI_72 = m * ((-gddpsp + here->B4SOIgds + FwdSum - gcdsb) + gIdtots + gdtots)); (here->B4SOI_73 = m * (gdpr + gdtot)); (here->B4SOI_74 = m * (gcsgb - Gm + gsspg - gIstotg - gstotg)); /* v4.0 */ (here->B4SOI_75 = m * ((here->B4SOIgds - gsspdp + RevSum - gcsdb + gIstotd) + gstotd)); /* v4.0 */ (here->B4SOI_76 = m * ((gspr - gstots + here->B4SOIgds + gsspsp + FwdSum + gcssb) + Gmin - gIstots)); /* v4.0 */ (here->B4SOI_77 = m * (gspr + gstot)); (here->B4SOI_78 = m * (gdpr + gdtot)); (here->B4SOI_79 = m * (gdpr - gdtotd)); (here->B4SOI_80 = m * (gspr + gstot)); (here->B4SOI_81 = m * (gspr - gstots)); if (here->B4SOIbodyMod == 1) { (here->B4SOI_82 = m * gppp); (here->B4SOI_83 = m * gppb); (here->B4SOI_84 = m * gppp); } /* v4.1 Ig_agbcp2 stamping */ (here->B4SOI_85 = m * gigpg); /* FIXME m or not m ?? h_vogt */ if (here->B4SOIbodyMod == 1) { (here->B4SOI_86 = m * gigpp); (here->B4SOI_87 = m * gigpg); (here->B4SOI_88 = m * gigpp); } else if(here->B4SOIbodyMod == 2) { (here->B4SOI_89 = m * gigpp); (here->B4SOI_90 = m * gigpg); (here->B4SOI_91 = m * gigpp); } if (selfheat) { (here->B4SOI_92 = m * (GmT + gddpT + gcdT)); (here->B4SOI_93 = m * (-GmT + gsspT + gcsT)); (here->B4SOI_94 = m * (gbbT + gcbT - gigT)); (here->B4SOI_95 = m * gceT); (here->B4SOI_96 = m * (gcgT + gigT)); (here->B4SOI_97 = m * (gTtt + 1/pParam->B4SOIrth + gcTt)); (here->B4SOI_98 = m * gTtg); (here->B4SOI_99 = m * gTtb); (here->B4SOI_100 = m * gTtdp); (here->B4SOI_101 = m * gTtsp); /* v3.0 */ if (here->B4SOIsoiMod != 0) /* v3.2 */ (here->B4SOI_102 = m * gTte); } #endif if (here->B4SOIdebugMod != 0) { *(here->B4SOIVbsPtr) += 1; *(here->B4SOIIdsPtr) += 1; *(here->B4SOIIcPtr) += 1; *(here->B4SOIIbsPtr) += 1; *(here->B4SOIIbdPtr) += 1; *(here->B4SOIIiiPtr) += 1; *(here->B4SOIIgPtr) += 1; *(here->B4SOIGiggPtr) += 1; *(here->B4SOIGigdPtr) += 1; *(here->B4SOIGigbPtr) += 1; *(here->B4SOIIgidlPtr) += 1; *(here->B4SOIItunPtr) += 1; *(here->B4SOIIbpPtr) += 1; *(here->B4SOICbgPtr) += 1; *(here->B4SOICbbPtr) += 1; *(here->B4SOICbdPtr) += 1; *(here->B4SOIQbfPtr) += 1; *(here->B4SOIQjsPtr) += 1; *(here->B4SOIQjdPtr) += 1; } line1000: ; #ifndef USE_OMP } /* End of Mosfet Instance */ } /* End of Model Instance */ #endif return(OK); } #ifdef USE_OMP /* OpenMP parallelization: Update of right hand side and matrix values from instance temporary storage. Update to matrix will be done only when all instances of this model have their values calculated and stored. Thus there is no further synchronisation required.*/ void B4SOILoadRhsMat(GENmodel *inModel, CKTcircuit *ckt) { int InstCount, idx; B4SOIinstance **InstArray; B4SOIinstance *here; B4SOImodel *model = (B4SOImodel*)inModel; InstArray = model->B4SOIInstanceArray; InstCount = model->B4SOIInstCount; for(idx = 0; idx < InstCount; idx++) { here = InstArray[idx]; model = B4SOImodPtr(here); /* Update b for Ax = b */ /* v3.1 */ /* v3.1 added ceqgcrg for RF */ (*(ckt->CKTrhs + here->B4SOIgNode) -= here->B4SOINode_1); /* v3.1 added ceqgcrg for RF end */ (*(ckt->CKTrhs + here->B4SOIdNodePrime) += here->B4SOINode_2); if (!here->B4SOIrbodyMod) { (*(ckt->CKTrhs + here->B4SOIsNodePrime) += here->B4SOINode_3); /* v4.0 */ } else { /* v4.0 */ (*(ckt->CKTrhs + here->B4SOIsNodePrime) += here->B4SOINode_4); } (*(ckt->CKTrhs + here->B4SOIeNode) -= here->B4SOINode_5); if (here->B4SOIrgateMod == 2) (*(ckt->CKTrhs + here->B4SOIgNodeExt) -= here->B4SOINode_6); else if (here->B4SOIrgateMod == 3) (*(ckt->CKTrhs + here->B4SOIgNodeMid) -= here->B4SOINode_7); if (here->B4SOIbodyMod == 1) { (*(ckt->CKTrhs + here->B4SOIpNode) += here->B4SOINode_8); } if ( here->B4SOIsoiMod != 2 ) {if (!here->B4SOIrbodyMod) (*(ckt->CKTrhs + here->B4SOIbNode) -= here->B4SOINode_9); else /* v4.0 */ { (*(ckt->CKTrhs + here->B4SOIdbNode) -= here->B4SOINode_10); (*(ckt->CKTrhs + here->B4SOIbNode) -= here->B4SOINode_11); (*(ckt->CKTrhs + here->B4SOIsbNode) -= here->B4SOINode_12); } } if (here->B4SOINode_sh) { (*(ckt->CKTrhs + here->B4SOItempNode) -= here->B4SOINode_13); } if (model->B4SOIrdsMod) { (*(ckt->CKTrhs + here->B4SOIdNode) -= here->B4SOINode_14); (*(ckt->CKTrhs + here->B4SOIsNode) += here->B4SOINode_15); } if (here->B4SOIdebugMod != 0) { *(ckt->CKTrhs + here->B4SOIvbsNode) = here->B4SOIvbseff; *(ckt->CKTrhs + here->B4SOIidsNode) = FLOG(here->B4SOIids); *(ckt->CKTrhs + here->B4SOIicNode) = FLOG(here->B4SOIic); *(ckt->CKTrhs + here->B4SOIibsNode) = FLOG(here->B4SOIibs); *(ckt->CKTrhs + here->B4SOIibdNode) = FLOG(here->B4SOIibd); *(ckt->CKTrhs + here->B4SOIiiiNode) = FLOG(here->B4SOIiii); *(ckt->CKTrhs + here->B4SOIigNode) = here->B4SOIig; *(ckt->CKTrhs + here->B4SOIgiggNode) = here->B4SOIgigg; *(ckt->CKTrhs + here->B4SOIgigdNode) = here->B4SOIgigd; *(ckt->CKTrhs + here->B4SOIgigbNode) = here->B4SOIgigb; *(ckt->CKTrhs + here->B4SOIigidlNode) = here->B4SOIigidl; *(ckt->CKTrhs + here->B4SOIitunNode) = here->B4SOIitun; *(ckt->CKTrhs + here->B4SOIibpNode) = here->B4SOIibp; *(ckt->CKTrhs + here->B4SOIcbbNode) = here->B4SOIcbb; *(ckt->CKTrhs + here->B4SOIcbdNode) = here->B4SOIcbd; *(ckt->CKTrhs + here->B4SOIcbgNode) = here->B4SOIcbg; *(ckt->CKTrhs + here->B4SOIqbfNode) = here->B4SOIqbf; *(ckt->CKTrhs + here->B4SOIqjsNode) = here->B4SOIqjs; *(ckt->CKTrhs + here->B4SOIqjdNode) = here->B4SOIqjd; } if (here->B4SOIrgateMod == 1) { *(here->B4SOIGEgePtr) += here->B4SOI_1; *(here->B4SOIGgePtr) -= here->B4SOI_2; *(here->B4SOIGEgPtr) -= here->B4SOI_3; } else if (here->B4SOIrgateMod == 2) { *(here->B4SOIGEgePtr) += here->B4SOI_4; *(here->B4SOIGEgPtr) += here->B4SOI_5; *(here->B4SOIGEdpPtr) += here->B4SOI_6; *(here->B4SOIGEspPtr) += here->B4SOI_7; *(here->B4SOIGgePtr) -= here->B4SOI_8; if (here->B4SOIsoiMod !=2) /* v3.2 */ *(here->B4SOIGEbPtr) += here->B4SOI_9; } else if (here->B4SOIrgateMod == 3) { *(here->B4SOIGEgePtr) += here->B4SOI_10; *(here->B4SOIGEgmPtr) -= here->B4SOI_11; *(here->B4SOIGMgePtr) -= here->B4SOI_12; *(here->B4SOIGMgmPtr) += here->B4SOI_13; *(here->B4SOIGMdpPtr) += here->B4SOI_14; *(here->B4SOIGMgPtr) += here->B4SOI_15; *(here->B4SOIGMspPtr) += here->B4SOI_16; *(here->B4SOIGMePtr) += here->B4SOI_17; if (here->B4SOIsoiMod !=2) /* v3.2 */ *(here->B4SOIGMbPtr) += here->B4SOI_18; *(here->B4SOIDPgmPtr) += here->B4SOI_19; *(here->B4SOIGgmPtr) -= here->B4SOI_20; *(here->B4SOISPgmPtr) += here->B4SOI_21; *(here->B4SOIEgmPtr) += here->B4SOI_22; } /* v3.1 added for RF end*/ /* v3.0 */ if (here->B4SOIsoiMod != 0) /* v3.2 */ { (*(here->B4SOIDPePtr) += here->B4SOI_23); (*(here->B4SOISPePtr) += here->B4SOI_24); if (here->B4SOIsoiMod != 2) /* v3.2 */ { *(here->B4SOIGePtr) += here->B4SOI_25; *(here->B4SOIBePtr) -= here->B4SOI_26; } } *(here->B4SOIEdpPtr) += here->B4SOI_27; *(here->B4SOIEspPtr) += here->B4SOI_28; *(here->B4SOIDPePtr) += here->B4SOI_29; *(here->B4SOISPePtr) += here->B4SOI_30; *(here->B4SOIEgPtr) += here->B4SOI_31; *(here->B4SOIGePtr) += here->B4SOI_32; /* v3.1 */ if (here->B4SOIsoiMod != 2) /* v3.2 */ { (*(here->B4SOIEbPtr) -= here->B4SOI_33); /* 3.2 bug fix */ /* v3.1 changed GbPtr for RF */ if ((here->B4SOIrgateMod == 0) || (here->B4SOIrgateMod == 1)) (*(here->B4SOIGbPtr) -= here->B4SOI_34); else /* v3.1 for rgateMod = 2 or 3 */ *(here->B4SOIGbPtr) += here->B4SOI_35; (*(here->B4SOIDPbPtr) -= here->B4SOI_36); /* v4.0 */ /* (*(here->B4SOIDPbPtr) -= (-gddpb - Gmbs + gcdgb + gcddb + gcdeb + gcdsb) + gcdgmb + gIdtotb ); */ (*(here->B4SOISPbPtr) -= here->B4SOI_37); /* v4.0 */ /* (*(here->B4SOISPbPtr) -= (-gsspb + Gmbs + gcsgb + gcsdb + gcseb + gcssb) + gcsgmb + Gmin + gIstotb); */ (*(here->B4SOIBePtr) += here->B4SOI_38); /* v3.0 */ (*(here->B4SOIBgPtr) += here->B4SOI_39); (*(here->B4SOIBdpPtr) += here->B4SOI_40); (*(here->B4SOIBspPtr) += here->B4SOI_41); /* if (!here->B4SOIrbodyMod) */ (*(here->B4SOIBbPtr) += here->B4SOI_42); /* else (*(here->B4SOIBbPtr) += -gigb - (Giib - Gbpbs) - gcbgb - gcbdb - gcbsb - gcbeb + Gmin) ; */ /* v4.0 */ if (here->B4SOIrbodyMod) { (*(here->B4SOIDPdbPtr) += here->B4SOI_43); (*(here->B4SOISPsbPtr) += here->B4SOI_44); (*(here->B4SOIDBdpPtr) += here->B4SOI_45); (*(here->B4SOIDBdbPtr) += here->B4SOI_46); (*(here->B4SOIDBbPtr) -= here->B4SOI_47); (*(here->B4SOISBspPtr) += here->B4SOI_48); (*(here->B4SOISBbPtr) -= here->B4SOI_49); (*(here->B4SOISBsbPtr) += here->B4SOI_50); (*(here->B4SOIBdbPtr) -= here->B4SOI_51); (*(here->B4SOIBsbPtr) -= here->B4SOI_52); (*(here->B4SOIBbPtr) += here->B4SOI_53); } if (model->B4SOIrdsMod) { (*(here->B4SOIDbPtr) += here->B4SOI_54); (*(here->B4SOISbPtr) += here->B4SOI_55); } } /* v3.1 */ if (model->B4SOIrdsMod) { (*(here->B4SOIDgPtr) += here->B4SOI_56); (*(here->B4SOIDspPtr) += here->B4SOI_57); (*(here->B4SOISdpPtr) += here->B4SOI_58); (*(here->B4SOISgPtr) += here->B4SOI_59); } (*(here->B4SOIEePtr) += here->B4SOI_60); if (here->B4SOIrgateMod == 0) { (*(here->B4SOIGgPtr) += here->B4SOI_61); (*(here->B4SOIGdpPtr) += here->B4SOI_62); (*(here->B4SOIGspPtr) += here->B4SOI_63); } else if (here->B4SOIrgateMod == 1) /* v3.1 for RF */ { *(here->B4SOIGgPtr) += here->B4SOI_64; *(here->B4SOIGdpPtr) += here->B4SOI_65; *(here->B4SOIGspPtr) += here->B4SOI_66; } else /* v3.1 for RF rgateMod == 2 or 3 */ { *(here->B4SOIGgPtr) += here->B4SOI_67; *(here->B4SOIGdpPtr) += here->B4SOI_68; *(here->B4SOIGspPtr) += here->B4SOI_69; } (*(here->B4SOIDPgPtr) += here->B4SOI_70); /* v4.0 */ (*(here->B4SOIDPdpPtr) += here->B4SOI_71); /* v4.0 */ (*(here->B4SOIDPspPtr) -= here->B4SOI_72); (*(here->B4SOIDPdPtr) -= here->B4SOI_73); (*(here->B4SOISPgPtr) += here->B4SOI_74); /* v4.0 */ (*(here->B4SOISPdpPtr) -= here->B4SOI_75); /* v4.0 */ (*(here->B4SOISPspPtr) += here->B4SOI_76); /* v4.0 */ (*(here->B4SOISPsPtr) -= here->B4SOI_77); (*(here->B4SOIDdPtr) += here->B4SOI_78); (*(here->B4SOIDdpPtr) -= here->B4SOI_79); (*(here->B4SOISsPtr) += here->B4SOI_80); (*(here->B4SOISspPtr) -= here->B4SOI_81); if (here->B4SOIbodyMod == 1) { (*(here->B4SOIBpPtr) -= here->B4SOI_82); (*(here->B4SOIPbPtr) += here->B4SOI_83); (*(here->B4SOIPpPtr) += here->B4SOI_84); } /* v4.1 Ig_agbcp2 stamping */ (*(here->B4SOIGgPtr) += here->B4SOI_85); /* FIXME m or not m ?? h_vogt */ if (here->B4SOIbodyMod == 1) { (*(here->B4SOIPpPtr) -= here->B4SOI_86); (*(here->B4SOIPgPtr) -= here->B4SOI_87); (*(here->B4SOIGpPtr) += here->B4SOI_88); } else if(here->B4SOIbodyMod == 2) { (*(here->B4SOIBbPtr) -= here->B4SOI_89); (*(here->B4SOIBgPtr) -= here->B4SOI_90); (*(here->B4SOIGbPtr) += here->B4SOI_91); } if (here->B4SOINode_sh) /* selfheat */ { (*(here->B4SOIDPtempPtr) += here->B4SOI_92); (*(here->B4SOISPtempPtr) += here->B4SOI_93); (*(here->B4SOIBtempPtr) += here->B4SOI_94); (*(here->B4SOIEtempPtr) +=here->B4SOI_95); (*(here->B4SOIGtempPtr) += here->B4SOI_96); (*(here->B4SOITemptempPtr) += here->B4SOI_97); (*(here->B4SOITempgPtr) += here->B4SOI_98); (*(here->B4SOITempbPtr) += here->B4SOI_99); (*(here->B4SOITempdpPtr) += here->B4SOI_100); (*(here->B4SOITempspPtr) += here->B4SOI_101); /* v3.0 */ if (here->B4SOIsoiMod != 0) /* v3.2 */ (*(here->B4SOITempePtr) += here->B4SOI_102); } if (here->B4SOIdebugMod != 0) { *(here->B4SOIVbsPtr) += 1; *(here->B4SOIIdsPtr) += 1; *(here->B4SOIIcPtr) += 1; *(here->B4SOIIbsPtr) += 1; *(here->B4SOIIbdPtr) += 1; *(here->B4SOIIiiPtr) += 1; *(here->B4SOIIgPtr) += 1; *(here->B4SOIGiggPtr) += 1; *(here->B4SOIGigdPtr) += 1; *(here->B4SOIGigbPtr) += 1; *(here->B4SOIIgidlPtr) += 1; *(here->B4SOIItunPtr) += 1; *(here->B4SOIIbpPtr) += 1; *(here->B4SOICbgPtr) += 1; *(here->B4SOICbbPtr) += 1; *(here->B4SOICbdPtr) += 1; *(here->B4SOIQbfPtr) += 1; *(here->B4SOIQjsPtr) += 1; *(here->B4SOIQjdPtr) += 1; } } } #endif
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "../profiler/profiler.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() {} /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() {} void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size() - 1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray(src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i - 1]), priority); reduce[i] = buf.copy_buf[i - 1]; const_vars[i - 1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); NDArray out = buf_merged; is_serial_push_ ? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray( kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>( rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray>& in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray>& in, NDArray* out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template <typename DType> inline static void ReduceSumCPU(const std::vector<DType*>& dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i += 4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i + 1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i + 1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i + 2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i + 1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i + 2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i + 3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template <typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() {} void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size() - 1); const std::string profiler_scope = profiler::ProfilerScope::Get()->GetCurrentProfilerScope() + "comm_dev:"; for (size_t i = 0; i < src.size() - 1; ++i) { buf.copy_buf[i] = NDArray(buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); buf.copy_buf[i].AssignStorageInfo(profiler_scope, "copy_buf"); } } for (size_t i = 0; i < src.size() - 1; ++i) { CopyFromTo(src[i + 1], &(buf.copy_buf[i]), priority); reduce[i + 1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); const std::string profiler_scope = profiler::ProfilerScope::Get()->GetCurrentProfilerScope() + "comm_dev:"; for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.copy_buf[i].AssignStorageInfo(profiler_scope, "copy_buf"); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i].AssignStorageInfo(profiler_scope, "residual"); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(mxnet::TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_recv_buf[i].AssignStorageInfo(profiler_scope, "compressed_recv_buf"); buf.compressed_send_buf[i] = NDArray(mxnet::TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i].AssignStorageInfo(profiler_scope, "compressed_send_buf"); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray( kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>( rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>( rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, mxnet::TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } const std::string profiler_scope = profiler::ProfilerScope::Get()->GetCurrentProfilerScope() + "kvstore:comm_dev:"; for (auto& sorted_key_attr : sorted_key_attrs_) { const int key = std::get<0>(sorted_key_attr); const mxnet::TShape& shape = std::get<1>(sorted_key_attr); const int type = std::get<2>(sorted_key_attr); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto& ctx_info_kv : ctx_info) { size_t size = ctx_info_kv.second.second; if (size <= min_size) { ctx = ctx_info_kv.second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); buf.merged.AssignStorageInfo(profiler_scope, "merge_buf_" + std::to_string(key)); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n * n); for (int i = 0; i < n; ++i) { // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i * n + j] = 1; } } } } if (enabled != n * (n - 1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n * (n - 1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i * n + j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
libimagequant.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <limits.h> #if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800) #error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher." #error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version." #endif #ifdef _OPENMP #include <omp.h> #define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */ #else #define LIQ_TEMP_ROW_WIDTH(img_width) (img_width) #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #include "nearest.h" #include "blur.h" #include "kmeans.h" #define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */ // each structure has a pointer as a unique identifier that allows type checking at run time static const char liq_attr_magic[] = "liq_attr"; static const char liq_image_magic[] = "liq_image"; static const char liq_result_magic[] = "liq_result"; static const char liq_histogram_magic[] = "liq_histogram"; static const char liq_remapping_result_magic[] = "liq_remapping_result"; static const char liq_freed_magic[] = "free"; #define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic) #define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr) struct liq_attr { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); double target_mse, max_mse, kmeans_iteration_limit; float min_opaque_val; unsigned int max_colors, max_histogram_entries; unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */; unsigned int kmeans_iterations, feedback_loop_trials; bool last_index_transparent, use_contrast_maps, use_dither_map; unsigned char speed; unsigned char progress_stage1, progress_stage2, progress_stage3; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_log_callback_function *log_callback; void *log_callback_user_info; liq_log_flush_callback_function *log_flush_callback; void *log_flush_callback_user_info; }; struct liq_image { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); f_pixel *f_pixels; rgba_pixel **rows; double gamma; unsigned int width, height; unsigned char *importance_map, *edges, *dither_map; rgba_pixel *pixels, *temp_row; f_pixel *temp_f_row; liq_image_get_rgba_row_callback *row_callback; void *row_callback_user_info; liq_image *background; float min_opaque_val; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; bool free_pixels, free_rows, free_rows_internal; }; typedef struct liq_remapping_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); unsigned char *pixels; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; double gamma, palette_error; float dither_level; bool use_dither_map; unsigned char progress_stage1; } liq_remapping_result; struct liq_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); liq_remapping_result *remapping; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; float dither_level; double gamma, palette_error; int min_posterization_output; bool use_dither_map; }; struct liq_histogram { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); struct acolorhash_table *acht; double gamma; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; unsigned short ignorebits; bool had_image_added; }; static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL; static void contrast_maps(liq_image *image) LIQ_NONNULL; static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL; static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL; static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL; static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL; static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL; static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL; static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL; LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); char buf[required_space]; va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context, buf, context->log_callback_user_info); } } LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg) { if (attr->log_callback) { attr->log_callback(attr, msg, attr->log_callback_user_info); } } LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr) { if (attr->log_flush_callback) { attr->log_flush_callback(attr, attr->log_flush_callback_user_info); } } LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent) { return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info); } LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent) { return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info); } #if USE_SSE inline static bool is_sse_available() { #if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64)) return true; #elif _MSC_VER int info[4]; __cpuid(info, 1); /* bool is implemented as a built-in type of size 1 in MSVC */ return info[3] & (1<<26) ? true : false; #else int a,b,c,d; cpuid(1, a, b, c, d); return d & (1<<25); // edx bit 25 is set when SSE is present #endif } #endif /* make it clear in backtrace when user-supplied handle points to invalid memory */ NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header); LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header) { if (!user_supplied_pointer) { return false; } if (user_supplied_pointer->magic_header == liq_freed_magic) { fprintf(stderr, "%s used after being freed", expected_magic_header); // this is not normal error handling, this is programmer error that should crash the program. // program cannot safely continue if memory has been used after it's been freed. // abort() is nasty, but security vulnerability may be worse. abort(); } return user_supplied_pointer->magic_header == expected_magic_header; } NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer); LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer) { if (!pointer) { return false; } // Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not. // It doesn't matter what value is read, the code here is just to shut the compiler up about unused read. char test_access = *((volatile char *)pointer); return test_access || true; } LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf(attr, " error: %s", msg); } static double quality_to_mse(long quality) { if (quality == 0) { return MAX_DIFF; } if (quality == 100) { return 0; } // curve fudged to be roughly similar to quality of libjpeg // except lowest 10 for really low number of colors const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001); return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0; } static unsigned int mse_to_quality(double mse) { for(int i=100; i > 0; i--) { if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors return i; } } return 0; } /** internally MSE is a sum of all channels with pixels 0..1 range, but other software gives per-RGB-channel MSE for 0..255 range */ static double mse_to_standard_mse(double mse) { return mse * 65536.0/6.0; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE; attr->target_mse = quality_to_mse(target); attr->max_mse = quality_to_mse(minimum); return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->max_mse); } LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->target_mse); } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE; attr->max_colors = colors; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->max_colors; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE; attr->min_posterization_output = bits; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->min_posterization_output; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE; unsigned int iterations = MAX(8-speed, 0); iterations += iterations * iterations/2; attr->kmeans_iterations = iterations; attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed)); attr->feedback_loop_trials = MAX(56-9*speed, 0); attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed); attr->min_posterization_input = (speed >= 8) ? 1 : 0; attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map; attr->speed = speed; attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8; if (attr->feedback_loop_trials < 2) attr->progress_stage1 += 30; attr->progress_stage3 = 50 / (1+speed); attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->speed; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } res->gamma = gamma; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE; attr->min_opaque_val = (double)min/255.0; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return MIN(255.f, 256.f * attr->min_opaque_val); } LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->last_index_transparent = !!is_last; } LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->progress_callback = callback; attr->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return; result->progress_callback = callback; result->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf_flush(attr); attr->log_callback = callback; attr->log_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->log_flush_callback = callback; attr->log_flush_callback_user_info = user_info; } LIQ_EXPORT liq_attr* liq_attr_create() { return liq_attr_create_with_allocator(NULL, NULL); } LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return; } liq_verbose_printf_flush(attr); attr->magic_header = liq_freed_magic; attr->free(attr); } LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig) { if (!CHECK_STRUCT_TYPE(orig, liq_attr)) { return NULL; } liq_attr *attr = orig->malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = *orig; return attr; } static void *liq_aligned_malloc(size_t size) { unsigned char *ptr = malloc(size + 16); if (!ptr) { return NULL; } uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1] ptr += offset; assert(0 == (((uintptr_t)ptr) & 15)); ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free() return ptr; } LIQ_NONNULL static void liq_aligned_free(void *inptr) { unsigned char *ptr = inptr; size_t offset = ptr[-1] ^ 0x59; assert(offset > 0 && offset <= 16); free(ptr - offset); } LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*)) { #if USE_SSE if (!is_sse_available()) { return NULL; } #endif if (!custom_malloc && !custom_free) { custom_malloc = liq_aligned_malloc; custom_free = liq_aligned_free; } else if (!custom_malloc != !custom_free) { return NULL; // either specify both or none } liq_attr *attr = custom_malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = (liq_attr) { .magic_header = liq_attr_magic, .malloc = custom_malloc, .free = custom_free, .max_colors = 256, .min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha) .last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles. .target_mse = 0, .max_mse = MAX_DIFF, }; liq_set_speed(attr, 3); return attr; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED; float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return LIQ_OK; } LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, f_pixel color) { if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED; hist->fixed_colors[hist->fixed_colors_count++] = color; return LIQ_OK; } LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img) { img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads()); return img->temp_f_row != NULL; } LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint) { return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow } static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma) { if (gamma < 0 || gamma > 1.0) { liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)"); return NULL; } if (!rows && !row_callback) { liq_log_error(attr, "missing row data"); return NULL; } liq_image *img = attr->malloc(sizeof(liq_image)); if (!img) return NULL; *img = (liq_image){ .magic_header = liq_image_magic, .malloc = attr->malloc, .free = attr->free, .width = width, .height = height, .gamma = gamma ? gamma : 0.45455, .rows = rows, .row_callback = row_callback, .row_callback_user_info = row_callback_user_info, .min_opaque_val = attr->min_opaque_val, }; if (!rows || attr->min_opaque_val < 1.f) { img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads()); if (!img->temp_row) return NULL; } // if image is huge or converted pixels are not likely to be reused then don't cache converted pixels if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) { verbose_print(attr, " conserving memory"); if (!liq_image_use_low_memory(img)) return NULL; } if (img->min_opaque_val < 1.f) { verbose_print(attr, " Working around IE6 bug by making image less transparent..."); } return img; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) { return LIQ_VALUE_OUT_OF_RANGE; } if (ownership_flags & LIQ_OWN_ROWS) { if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE; img->free_rows = true; } if (ownership_flags & LIQ_OWN_PIXELS) { img->free_pixels = true; if (!img->pixels) { // for simplicity of this API there's no explicit bitmap argument, // so the row with the lowest address is assumed to be at the start of the bitmap img->pixels = img->rows[0]; for(unsigned int i=1; i < img->height; i++) { img->pixels = MIN(img->pixels, img->rows[i]); } } } return LIQ_OK; } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image); LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image); LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER; const size_t required_size = img->width * img->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } if (ownership == LIQ_COPY_PIXELS) { unsigned char *tmp = img->malloc(required_size); if (!tmp) { return LIQ_OUT_OF_MEMORY; } memcpy(tmp, importance_map, required_size); importance_map = tmp; } else if (ownership != LIQ_OWN_PIXELS) { return LIQ_UNSUPPORTED; } liq_image_free_importance_map(img); img->importance_map = importance_map; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER; if (background->background) { return LIQ_UNSUPPORTED; } if (img->width != background->width || img->height != background->height) { return LIQ_BUFFER_TOO_SMALL; } if (img->background) { liq_image_destroy(img->background); } img->background = background; liq_image_free_maps(img); // Force them to be re-analyzed with the background return LIQ_OK; } LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return false; } if (width <= 0 || height <= 0) { liq_log_error(attr, "width and height must be > 0"); return false; } if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) { liq_log_error(attr, "image too large"); return false; } return true; } LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } for(int i=0; i < height; i++) { if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) { liq_log_error(attr, "invalid row pointers"); return NULL; } } return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma); } LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } if (!CHECK_USER_POINTER(bitmap)) { liq_log_error(attr, "invalid bitmap pointer"); return NULL; } rgba_pixel *const pixels = (rgba_pixel *const)bitmap; rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height); if (!rows) return NULL; for(int i=0; i < height; i++) { rows[i] = pixels + width * i; } liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma); if (!image) { attr->free(rows); return NULL; } image->free_rows = true; image->free_rows_internal = true; return image; } NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info); LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info) { assert(callback); assert(temp_row); callback(temp_row, row, width, user_info); } LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img) { if (!CHECK_STRUCT_TYPE(img, liq_image)) { return false; } return img->rows || (img->temp_row && img->row_callback); } LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img) { assert(liq_image_has_rgba_pixels(img)); const bool iebug = img->min_opaque_val < 1.f; return (img->rows && !iebug); } LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row) { if (liq_image_can_use_rgba_rows(img)) { return img->rows[row]; } assert(img->temp_row); rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); if (img->rows) { memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0])); } else { liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info); } if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row); return temp_row; } LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[]) { assert(row_f_pixels); assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15)); const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row); for(unsigned int col=0; col < img->width; col++) { row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]); } } LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img) { assert(omp_get_thread_num() == 0); if (img->f_pixels) { return true; } if (!liq_image_should_use_low_memory(img, false)) { img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height); } if (!img->f_pixels) { return liq_image_use_low_memory(img); } if (!liq_image_has_rgba_pixels(img)) { return false; } float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); for(unsigned int i=0; i < img->height; i++) { convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut); } return true; } LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row) { if (!img->f_pixels) { assert(img->temp_f_row); // init should have done that float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); convert_row_to_f(img, row_for_thread, row, gamma_lut); return row_for_thread; } return img->f_pixels + img->width * row; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->width; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->height; } typedef void free_func(void*); LIQ_NONNULL static free_func *get_default_free_func(liq_image *img) { // When default allocator is used then user-supplied pointers must be freed with free() if (img->free_rows_internal || img->free != liq_aligned_free) { return img->free; } return free; } LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image) { if (input_image->free_pixels && input_image->pixels) { get_default_free_func(input_image)(input_image->pixels); input_image->pixels = NULL; } if (input_image->free_rows && input_image->rows) { get_default_free_func(input_image)(input_image->rows); input_image->rows = NULL; } } LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) { if (input_image->importance_map) { input_image->free(input_image->importance_map); input_image->importance_map = NULL; } } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) { liq_image_free_importance_map(input_image); if (input_image->edges) { input_image->free(input_image->edges); input_image->edges = NULL; } if (input_image->dither_map) { input_image->free(input_image->dither_map); input_image->dither_map = NULL; } } LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return; liq_image_free_rgba_source(input_image); liq_image_free_maps(input_image); if (input_image->f_pixels) { input_image->free(input_image->f_pixels); } if (input_image->temp_row) { input_image->free(input_image->temp_row); } if (input_image->temp_f_row) { input_image->free(input_image->temp_f_row); } if (input_image->background) { liq_image_destroy(input_image->background); } input_image->magic_header = liq_freed_magic; input_image->free(input_image); } LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return NULL; } liq_histogram *hist = attr->malloc(sizeof(liq_histogram)); if (!hist) return NULL; *hist = (liq_histogram) { .magic_header = liq_histogram_magic, .malloc = attr->malloc, .free = attr->free, .ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input), }; return hist; } LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return; hist->magic_header = liq_freed_magic; pam_freeacolorhash(hist->acht); hist->free(hist); } LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img) { liq_result *res; if (LIQ_OK != liq_image_quantize(img, attr, &res)) { return NULL; } return res; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!liq_image_has_rgba_pixels(img)) { return LIQ_UNSUPPORTED; } liq_histogram *hist = liq_histogram_create(attr); if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_error err = liq_histogram_add_image(hist, attr, img); if (LIQ_OK != err) { return err; } err = liq_histogram_quantize_internal(hist, attr, false, result_output); liq_histogram_destroy(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) { return liq_histogram_quantize_internal(input_hist, attr, true, result_output); } LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) { if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER; *result_output = NULL; if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (liq_progress(attr, 0)) return LIQ_ABORTED; histogram *hist; liq_error err = finalize_histogram(input_hist, attr, &hist); if (err != LIQ_OK) { return err; } err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output); pam_freeacolorhist(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE; res->dither_level = dither_level; return LIQ_OK; } LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return NULL; } liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result)); if (!res) return NULL; *res = (liq_remapping_result) { .magic_header = liq_remapping_result_magic, .malloc = result->malloc, .free = result->free, .dither_level = result->dither_level, .use_dither_map = result->use_dither_map, .palette_error = result->palette_error, .gamma = result->gamma, .palette = pam_duplicate_colormap(result->palette), .progress_callback = result->progress_callback, .progress_callback_user_info = result->progress_callback_user_info, .progress_stage1 = result->use_dither_map ? 20 : 0, }; return res; } LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; return result->gamma; } LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return; if (result->palette) pam_freecolormap(result->palette); if (result->pixels) result->free(result->pixels); result->magic_header = liq_freed_magic; result->free(result); } LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return; memset(&res->int_palette, 0, sizeof(liq_palette)); if (res->remapping) { memset(&res->remapping->int_palette, 0, sizeof(liq_palette)); liq_remapping_result_destroy(res->remapping); } pam_freecolormap(res->palette); res->magic_header = liq_freed_magic; res->free(res); } LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_standard_mse(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_standard_mse(result->remapping->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_quality(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_quality(result->remapping->palette_error); } return -1; } LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2) { const float v1 = ((const colormap_item*)ch1)->popularity; const float v2 = ((const colormap_item*)ch2)->popularity; return v1 > v2 ? -1 : 1; } LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem) { if (!nelem) return; qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity); } #define SWAP_PALETTE(map, a,b) { \ const colormap_item tmp = (map)->palette[(a)]; \ (map)->palette[(a)] = (map)->palette[(b)]; \ (map)->palette[(b)] = tmp; } LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options) { /* ** Step 3.5 [GRR]: remap the palette colors so that all entries with ** the maximal alpha value (i.e., fully opaque) are at the end and can ** therefore be omitted from the tRNS chunk. */ if (options->last_index_transparent) { for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 1.f/256.f) { const unsigned int old = i, transparent_dest = map->colors-1; SWAP_PALETTE(map, transparent_dest, old); /* colors sorted by popularity make pngs slightly more compressible */ sort_palette_qsort(map, 0, map->colors-1); return; } } } unsigned int non_fixed_colors = 0; for(unsigned int i = 0; i < map->colors; i++) { if (map->palette[i].fixed) { break; } non_fixed_colors++; } /* move transparent colors to the beginning to shrink trns chunk */ unsigned int num_transparent = 0; for(unsigned int i = 0; i < non_fixed_colors; i++) { if (map->palette[i].acolor.a < 255.f/256.f) { // current transparent color is swapped with earlier opaque one if (i != num_transparent) { SWAP_PALETTE(map, num_transparent, i); i--; } num_transparent++; } } liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies"); /* colors sorted by popularity make pngs slightly more compressible * opaque and transparent are sorted separately */ sort_palette_qsort(map, 0, num_transparent); sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent); if (non_fixed_colors > 9 && map->colors > 16) { SWAP_PALETTE(map, 7, 1); // slightly improves compression SWAP_PALETTE(map, 8, 2); SWAP_PALETTE(map, 9, 3); } } inline static unsigned int posterize_channel(unsigned int color, unsigned int bits) { return (color & ~((1<<bits)-1)) | (color >> (8-bits)); } LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma); dest->count = map->colors; for(unsigned int x = 0; x < map->colors; ++x) { rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor); px.r = posterize_channel(px.r, posterize); px.g = posterize_channel(px.g, posterize); px.b = posterize_channel(px.b, posterize); px.a = posterize_channel(px.a, posterize); map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */ if (!px.a && !map->palette[x].fixed) { px.r = 71; px.g = 112; px.b = 76; } dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a}; } } LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL; if (result->remapping && result->remapping->int_palette.count) { return &result->remapping->int_palette; } if (!result->int_palette.count) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output); } return &result->int_palette; } LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map) { const int rows = input_image->height; const unsigned int cols = input_image->width; double remapping_error=0; if (!liq_image_get_row_f_init(input_image)) { return -1; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return -1; } const colormap_item *acolormap = map->palette; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; const unsigned int max_threads = omp_get_max_threads(); kmeans_state average_color[(KMEANS_CACHE_LINE_GAP+map->colors) * max_threads]; kmeans_init(map, max_threads, average_color); #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error) for(int row = 0; row < rows; ++row) { const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; unsigned int last_match=0; for(unsigned int col = 0; col < cols; ++col) { float diff; last_match = nearest_search(n, &row_pixels[col], last_match, &diff); if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) { last_match = transparent_index; } output_pixels[row][col] = last_match; remapping_error += diff; kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color); } } kmeans_finalize(map, max_threads, average_color); nearest_free(n); return remapping_error / (input_image->width * input_image->height); } inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px) { /* Use Floyd-Steinberg errors to adjust actual color. */ const float sr = thiserr.r * dither_level, sg = thiserr.g * dither_level, sb = thiserr.b * dither_level, sa = thiserr.a * dither_level; float ratio = 1.0; const float max_overflow = 1.1f; const float max_underflow = -0.1f; // allowing some overflow prevents undithered bands caused by clamping of all channels if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr); else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); } if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg); else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); } if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb); else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); } float a = px.a + sa; if (a > 1.f) { a = 1.f; } else if (a < 0) { a = 0; } // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa; if (dither_error > max_dither_error) { ratio *= 0.8f; } else if (dither_error < 2.f/256.f/256.f) { // don't dither areas that don't have noticeable error — makes file smaller return px; } return (f_pixel){ .r=px.r + sr * ratio, .g=px.g + sg * ratio, .b=px.b + sb * ratio, .a=a, }; } /** Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered. If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image. */ LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped) { const int rows = input_image->height, cols = input_image->width; const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL; const colormap *map = quant->palette; const colormap_item *acolormap = map->palette; if (!liq_image_get_row_f_init(input_image)) { return false; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return false; } /* Initialize Floyd-Steinberg error vectors. */ const size_t errwidth = cols+2; f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access if (!thiserr) return false; f_pixel *restrict nexterr = thiserr + errwidth; memset(thiserr, 0, errwidth * sizeof(thiserr[0])); bool ok = true; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; // response to this value is non-linear and without it any value < 0.8 would give almost no dithering float base_dithering_level = quant->dither_level; base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level); if (dither_map) { base_dithering_level *= 1.f/255.f; // convert byte to float } base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating int fs_direction = 1; unsigned int last_match=0; for (int row = 0; row < rows; ++row) { if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) { ok = false; break; } memset(nexterr, 0, errwidth * sizeof(nexterr[0])); int col = (fs_direction > 0) ? 0 : (cols - 1); const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; do { float dither_level = base_dithering_level; if (dither_map) { dither_level *= dither_map[row*cols + col]; } const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]); const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match; float diff; last_match = nearest_search(n, &spx, guessed_match, &diff); f_pixel output_px = acolormap[last_match].acolor; if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) { output_px = bg_pixels[col]; output_pixels[row][col] = transparent_index; } else { output_pixels[row][col] = last_match; } f_pixel err = { .r = (spx.r - output_px.r), .g = (spx.g - output_px.g), .b = (spx.b - output_px.b), .a = (spx.a - output_px.a), }; // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) { err.r *= 0.75f; err.g *= 0.75f; err.b *= 0.75f; err.a *= 0.75f; } /* Propagate Floyd-Steinberg error terms. */ if (fs_direction > 0) { thiserr[col + 2].a += err.a * (7.f/16.f); thiserr[col + 2].r += err.r * (7.f/16.f); thiserr[col + 2].g += err.g * (7.f/16.f); thiserr[col + 2].b += err.b * (7.f/16.f); nexterr[col + 2].a = err.a * (1.f/16.f); nexterr[col + 2].r = err.r * (1.f/16.f); nexterr[col + 2].g = err.g * (1.f/16.f); nexterr[col + 2].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col ].a += err.a * (3.f/16.f); nexterr[col ].r += err.r * (3.f/16.f); nexterr[col ].g += err.g * (3.f/16.f); nexterr[col ].b += err.b * (3.f/16.f); } else { thiserr[col ].a += err.a * (7.f/16.f); thiserr[col ].r += err.r * (7.f/16.f); thiserr[col ].g += err.g * (7.f/16.f); thiserr[col ].b += err.b * (7.f/16.f); nexterr[col ].a = err.a * (1.f/16.f); nexterr[col ].r = err.r * (1.f/16.f); nexterr[col ].g = err.g * (1.f/16.f); nexterr[col ].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col + 2].a += err.a * (3.f/16.f); nexterr[col + 2].r += err.r * (3.f/16.f); nexterr[col + 2].g += err.g * (3.f/16.f); nexterr[col + 2].b += err.b * (3.f/16.f); } // remapping is done in zig-zag col += fs_direction; if (fs_direction > 0) { if (col >= cols) break; } else { if (col < 0) break; } } while(1); f_pixel *const temperr = thiserr; thiserr = nexterr; nexterr = temperr; fs_direction = -fs_direction; } input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped nearest_free(n); return ok; } /* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */ LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse) { const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f); if (fixed_colors_count) { for(int j=0; j < hist->size; j++) { for(unsigned int i=0; i < fixed_colors_count; i++) { if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) { hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry j--; break; // continue searching histogram } } } } } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER; if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE; if (input_hist->ignorebits > 0 && input_hist->had_image_added) { return LIQ_UNSUPPORTED; } input_hist->ignorebits = 0; input_hist->had_image_added = true; input_hist->gamma = gamma ? gamma : 0.45455; if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free); if (!input_hist->acht) { return LIQ_OUT_OF_MEMORY; } } // Fake image size. It's only for hash size estimates. if (!input_hist->acht->cols) { input_hist->acht->cols = num_entries; } input_hist->acht->rows += num_entries; const unsigned int hash_size = input_hist->acht->hash_size; for(int i=0; i < num_entries; i++) { const rgba_pixel rgba = { .r = entries[i].color.r, .g = entries[i].color.g, .b = entries[i].color.b, .a = entries[i].color.a, }; union rgba_as_int px = {rgba}; unsigned int hash; if (px.rgba.a) { hash = px.l % hash_size; } else { hash=0; px.l=0; } if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) { return LIQ_OUT_OF_MEMORY; } } return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; const unsigned int cols = input_image->width, rows = input_image->height; if (!input_image->importance_map && options->use_contrast_maps) { contrast_maps(input_image); } input_hist->gamma = input_image->gamma; for(int i = 0; i < input_image->fixed_colors_count; i++) { liq_error res = liq_histogram_add_fixed_color(input_hist, input_image->fixed_colors[i]); if (res != LIQ_OK) { return res; } } /* ** Step 2: attempt to make a histogram of the colors, unclustered. ** If at first we don't succeed, increase ignorebits to increase color ** coherence and try again. */ if (liq_progress(options, options->progress_stage1 * 0.4f)) return LIQ_ABORTED; const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image); // Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not // the first image added const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries; do { if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free); } if (!input_hist->acht) return LIQ_OUT_OF_MEMORY; // histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important. // noise map does not include edges to avoid ruining anti-aliasing for(unsigned int row=0; row < rows; row++) { bool added_ok; if (all_rows_at_once) { added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map); if (added_ok) break; } else { const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) }; added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL); } if (!added_ok) { input_hist->ignorebits++; liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED; break; } } } while(!input_hist->acht); input_hist->had_image_added = true; liq_image_free_importance_map(input_image); if (input_image->free_pixels && input_image->f_pixels) { liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels } return LIQ_OK; } LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) { if (liq_progress(options, options->progress_stage1 * 0.9f)) { return LIQ_ABORTED; } if (!input_hist->acht) { return LIQ_BITMAP_NOT_AVAILABLE; } histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_verbose_printf(options, " made histogram...%d colors found", hist->size); remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse); *hist_output = hist; return LIQ_OK; } LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) { /* IE6 makes colors with even slightest transparency completely transparent, thus to improve situation in IE, make colors that are less than ~10% transparent completely opaque */ const float min_opaque_val = input_image->min_opaque_val; const float almost_opaque_val = min_opaque_val * 169.f/256.f; const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f; for(unsigned int col = 0; col < input_image->width; col++) { const rgba_pixel px = row_pixels[col]; /* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */ if (px.a >= almost_opaque_val_int) { float al = px.a / 255.f; al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val); al *= 256.f; row_pixels[col].a = al >= 255.f ? 255 : al; } } } /** Builds two maps: importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy. edges - noise map including all edges */ LIQ_NONNULL static void contrast_maps(liq_image *image) { const unsigned int cols = image->width, rows = image->height; if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) { return; } unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows); image->importance_map = NULL; unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows); image->edges = NULL; unsigned char *restrict tmp = image->malloc(cols*rows); if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) { image->free(noise); image->free(edges); image->free(tmp); return; } const f_pixel *curr_row, *prev_row, *next_row; curr_row = prev_row = next_row = liq_image_get_row_f(image, 0); for (unsigned int j=0; j < rows; j++) { prev_row = curr_row; curr_row = next_row; next_row = liq_image_get_row_f(image, MIN(rows-1,j+1)); f_pixel prev, curr = curr_row[0], next=curr; for (unsigned int i=0; i < cols; i++) { prev=curr; curr=next; next = curr_row[MIN(cols-1,i+1)]; // contrast is difference between pixels neighbouring horizontally and vertically const float a = fabsf(prev.a+next.a - curr.a*2.f), r = fabsf(prev.r+next.r - curr.r*2.f), g = fabsf(prev.g+next.g - curr.g*2.f), b = fabsf(prev.b+next.b - curr.b*2.f); const f_pixel prevl = prev_row[i]; const f_pixel nextl = next_row[i]; const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f), r1 = fabsf(prevl.r+nextl.r - curr.r*2.f), g1 = fabsf(prevl.g+nextl.g - curr.g*2.f), b1 = fabsf(prevl.b+nextl.b - curr.b*2.f); const float horiz = MAX(MAX(a,r),MAX(g,b)); const float vert = MAX(MAX(a1,r1),MAX(g1,b1)); const float edge = MAX(horiz,vert); float z = edge - fabsf(horiz-vert)*.5f; z = 1.f - MAX(z,MIN(horiz,vert)); z *= z; // noise is amplified z *= z; // 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely). const unsigned int z_int = 85 + (unsigned int)(z * 171.f); noise[j*cols+i] = MIN(z_int, 255); const int e_int = 255 - (int)(edge * 256.f); edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0; } } // noise areas are shrunk and then expanded to remove thin edges from the map liq_max3(noise, tmp, cols, rows); liq_max3(tmp, noise, cols, rows); liq_blur(noise, tmp, noise, cols, rows, 3); liq_max3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(edges, tmp, cols, rows); liq_max3(tmp, edges, cols, rows); for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]); image->free(tmp); image->importance_map = noise; image->edges = edges; } /** * Builds map of neighbor pixels mapped to the same palette entry * * For efficiency/simplicity it mainly looks for same consecutive pixels horizontally * and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly. * Correct flood fill doesn't have visually good properties. */ LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map) { const unsigned int width = input_image->width; const unsigned int height = input_image->height; unsigned char *const edges = input_image->edges; for(unsigned int row=0; row < height; row++) { unsigned char lastpixel = row_pointers[row][0]; unsigned int lastcol=0; for(unsigned int col=1; col < width; col++) { const unsigned char px = row_pointers[row][col]; if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) { // Transparency may or may not create an edge. When there's an explicit background set, assume no edge. continue; } if (px != lastpixel || col == width-1) { int neighbor_count = 10 * (col-lastcol); unsigned int i=lastcol; while(i < col) { if (row > 0) { unsigned char pixelabove = row_pointers[row-1][i]; if (pixelabove == lastpixel) neighbor_count += 15; } if (row < height-1) { unsigned char pixelbelow = row_pointers[row+1][i]; if (pixelbelow == lastpixel) neighbor_count += 15; } i++; } while(lastcol <= col) { int e = edges[row*width + lastcol]; edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count)); } lastpixel = px; } } } input_image->dither_map = input_image->edges; input_image->edges = NULL; } /** * Palette can be NULL, in which case it creates a new palette from scratch. */ static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) { if (!fixed_colors_count) return palette; colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free); unsigned int i=0; if (palette && fixed_colors_count < max_colors) { unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count); for(; i < palette_max; i++) { newpal->palette[i] = palette->palette[i]; } } for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) { newpal->palette[i++] = (colormap_item){ .acolor = fixed_colors[j], .fixed = true, }; } if (palette) pam_freecolormap(palette); return newpal; } LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff) { item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff)); } /** Repeats mediancut with different histogram weights to find palette with minimum error. feedback_loop_trials controls how long the search will take. < 0 skips the iteration. */ static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p) { unsigned int max_colors = options->max_colors; // if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse // at this point actual gamma is not set, so very conservative posterization estimate is used const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2))); int feedback_loop_trials = options->feedback_loop_trials; colormap *acolormap = NULL; double least_error = MAX_DIFF; double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0; const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1); do { colormap *newmap; if (hist->size && fixed_colors_count < max_colors) { newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2, options->malloc, options->free); } else { feedback_loop_trials = 0; newmap = NULL; } newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); if (!newmap) { return NULL; } if (feedback_loop_trials <= 0) { return newmap; } // after palette has been created, total error (MSE) is calculated to keep the best palette // at the same time K-Means iteration is done to improve the palette // and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors const bool first_run_of_target_mse = !acolormap && target_mse > 0; double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback); // goal is to increase quality or to reduce number of colors used if quality is good enough if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) { if (acolormap) pam_freecolormap(acolormap); acolormap = newmap; if (total_error < target_mse && total_error > 0) { // K-Means iteration improves quality above what mediancut aims for // this compensates for it, making mediancut aim for worse target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error); } least_error = total_error; // if number of colors could be reduced, try to keep it that way // but allow extra color as a bit of wiggle room in case quality can be improved too max_colors = MIN(newmap->colors+1, max_colors); feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever } else { for(unsigned int j=0; j < hist->size; j++) { hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0; } target_mse_overshoot = 1.0; feedback_loop_trials -= 6; // if error is really bad, it's unlikely to improve, so end sooner if (total_error > least_error*4) feedback_loop_trials -= 3; pam_freecolormap(newmap); } float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials); if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break; liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done)); } while(feedback_loop_trials > 0); *palette_error_p = least_error; return acolormap; } static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) { if (!hist->size) { return NULL; } colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free); for(unsigned int i=0; i < hist->size; i++) { acolormap->palette[i].acolor = hist->achv[i].acolor; acolormap->palette[i].popularity = hist->achv[i].perceptual_weight; } return acolormap; } LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output) { colormap *acolormap; double palette_error = -1; assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1)); const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors; if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED; // If image has few colors to begin with (and no quality degradation is required) // then it's possible to skip quantization entirely if (few_input_colors && options->target_mse == 0) { acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); palette_error = 0; } else { const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error); if (!acolormap) { return LIQ_VALUE_OUT_OF_RANGE; } // K-Means iteration approaches local minimum for the palette const double iteration_limit = options->kmeans_iteration_limit; unsigned int iterations = options->kmeans_iterations; if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work if (iterations) { // likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) { if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) { hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway } } verbose_print(options, " moving colormap towards local minimum"); double previous_palette_error = MAX_DIFF; for(unsigned int i=0; i < iterations; i++) { palette_error = kmeans_do_iteration(hist, acolormap, NULL); if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) { break; } if (fabs(previous_palette_error-palette_error) < iteration_limit) { break; } if (palette_error > max_mse*1.5) { // probably hopeless if (palette_error > max_mse*3.0) break; // definitely hopeless i++; } previous_palette_error = palette_error; } } if (palette_error > max_mse) { liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)", mse_to_standard_mse(palette_error), mse_to_quality(palette_error), mse_to_standard_mse(max_mse), mse_to_quality(max_mse)); pam_freecolormap(acolormap); return LIQ_QUALITY_TOO_LOW; } } if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) { pam_freecolormap(acolormap); return LIQ_ABORTED; } sort_palette(acolormap, options); // If palette was created from a multi-image histogram, // then it shouldn't be optimized for one image during remapping if (fixed_result_colors) { for(unsigned int i=0; i < acolormap->colors; i++) { acolormap->palette[i].fixed = true; } } liq_result *result = options->malloc(sizeof(liq_result)); if (!result) return LIQ_OUT_OF_MEMORY; *result = (liq_result){ .magic_header = liq_result_magic, .malloc = options->malloc, .free = options->free, .palette = acolormap, .palette_error = palette_error, .use_dither_map = options->use_dither_map, .gamma = gamma, .min_posterization_output = options->min_posterization_output, }; *result_output = result; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return LIQ_INVALID_POINTER; } if (!CHECK_STRUCT_TYPE(input_image, liq_image)) { return LIQ_INVALID_POINTER; } if (!CHECK_USER_POINTER(buffer)) { return LIQ_INVALID_POINTER; } const size_t required_size = input_image->width * input_image->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } unsigned char *rows[input_image->height]; unsigned char *buffer_bytes = buffer; for(unsigned int i=0; i < input_image->height; i++) { rows[i] = &buffer_bytes[input_image->width * i]; } return liq_write_remapped_image_rows(result, input_image, rows); } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers) { if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; for(unsigned int i=0; i < input_image->height; i++) { if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER; } if (quant->remapping) { liq_remapping_result_destroy(quant->remapping); } liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant); if (!result) return LIQ_OUT_OF_MEMORY; if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) { contrast_maps(input_image); } if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) { return LIQ_ABORTED; } /* ** Step 4: map the colors in the image to their closest match in the ** new colormap, and write 'em out. */ float remapping_error = result->palette_error; if (result->dither_level == 0) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remapping_error = remap_to_palette(input_image, row_pointers, result->palette); } else { const bool generate_dither_map = result->use_dither_map && (input_image->edges && !input_image->dither_map); if (generate_dither_map) { // If dithering (with dither map) is required, this image is used to find areas that require dithering remapping_error = remap_to_palette(input_image, row_pointers, result->palette); update_dither_map(input_image, row_pointers, result->palette); } if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) { return LIQ_ABORTED; } // remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) { return LIQ_ABORTED; } } // remapping error from dithered image is absurd, so always non-dithered value is used // palette_error includes some perceptual weighting from histogram which is closer correlated with dssim // so that should be used when possible. if (result->palette_error < 0) { result->palette_error = remapping_error; } return LIQ_OK; } LIQ_EXPORT int liq_version() { return LIQ_VERSION; }
ocp_nlp_sqp_rti.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp_rti.h" // external #include <assert.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" #include "acados_c/ocp_qp_interface.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; int size = 0; size += sizeof(ocp_nlp_sqp_rti_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_rti_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_constraints_config **constraints = config->constraints; // int ii; // int N = dims->N; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP RTI opts opts->ext_qp_res = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; // overwrite default submodules opts // do not compute adjoint in dynamics and constraints // int compute_adj = 0; // // dynamics // for (ii = 0; ii < N; ii++) // { // dynamics[ii]->opts_set(dynamics[ii], // opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj); // } // // constraints // for (ii = 0; ii <= N; ii++) // { // constraints[ii]->opts_set(constraints[ii], // opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj); // } return; } void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_config *config = config_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 2) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0, 1, 2\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_rti_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); } } return; } void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_rti_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; size += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = 1+1; int stat_n = 2; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int ii; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = 1+1; mem->stat_n = 2; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size( config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_rti_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int size = 0; // sqp size += sizeof(ocp_nlp_sqp_rti_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_rti_cast_workspace( ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_rti_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign( config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign( dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size( dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_sqp_rti_memory *mem = mem_; // zero timers acados_timer timer0; double total_time = 0.0; mem->time_tot = 0.0; ocp_nlp_sqp_rti_opts *nlp_opts = opts_; int rti_phase = nlp_opts->rti_phase; acados_tic(&timer0); switch(rti_phase) { // perform preparation and feedback rti_phase case 0: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform preparation rti_phase case 1: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform feedback rti_phase case 2: ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; } total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; return mem->status; } void ocp_nlp_sqp_rti_preparation_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; mem->time_lin = 0.0; mem->time_reg = 0.0; int N = dims->N; int ii; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr( nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr( nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr( nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr( nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr( nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr( nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr( nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr( nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr( nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr( nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr( nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr( nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_rev_ptr( nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxe_ptr( nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr( dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr( dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr( dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr( dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr( dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr( dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr( dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr( dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr( dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); /* SQP body */ int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif return; } void ocp_nlp_sqp_rti_feedback_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int qp_iter = 0; int qp_status = 0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_glob = 0.0; // embed initial value (this actually updates all bounds at stage 0...) ocp_nlp_embed_initial_value(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); if (opts->print_level > 0) { printf("\n------- qp_in --------\n"); print_ocp_qp_in(nlp_mem->qp_in); } if (!opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; qp_iter = qp_info_->num_iter; // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, // inf_norm_qp_res[0], inf_norm_qp_res[1], // inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(nlp_mem->qp_out); // exit(1); // save statistics mem->stat[mem->stat_n*1+0] = qp_status; mem->stat[mem->stat_n*1+1] = qp_iter; if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(mem->qp_in); #ifndef ACADOS_SILENT printf("QP solver returned error status %d\n", qp_status); #endif mem->status = ACADOS_QP_FAILURE; return; } // globalization acados_tic(&timer1); double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_glob += acados_toc(&timer1); // update variables ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // print_ocp_qp_in(mem->qp_in); mem->status = ACADOS_SUCCESS; } int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(giaf) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_rti_precompute: inconsistent dimension ns \ for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in \ ocp_nlp_sqp_rti_eval_param_sens\n", field, stage); exit(1); } return; } // TODO rename memory_get ??? void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_rti_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = 1; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_glob", field)) { double *value = return_value_; *value = mem->time_glob; } else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field)) { double tmp = 0.0; double *ptr = return_value_; int N = dims->N; int ii; for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp); *ptr += tmp; } } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = 2; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_mem->nlp_res; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else if (!strcmp("res_stat", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_stat; } else if (!strcmp("res_eq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_eq; } else if (!strcmp("res_ineq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_ineq; } else if (!strcmp("res_comp", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_comp; } else if (!strcmp("cost_value", field)) { double *value = return_value_; *value = mem->nlp_mem->cost_value; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_opts_get(void *config_, void *dims_, void *opts_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; if (!strcmp("nlp_opts", field)) { void **value = return_value_; *value = opts->nlp_opts; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_opts_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_work_get(void *config_, void *dims_, void *work_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_workspace *work = work_; if (!strcmp("nlp_work", field)) { void **value = return_value_; *value = work->nlp_work; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_work_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_rti_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_rti_opts_update; config->opts_set = &ocp_nlp_sqp_rti_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_rti_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp_rti; config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default; config->precompute = &ocp_nlp_sqp_rti_precompute; config->get = &ocp_nlp_sqp_rti_get; config->opts_get = &ocp_nlp_sqp_rti_opts_get; config->work_get = &ocp_nlp_sqp_rti_work_get; return; }
mish_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: 942002795@qq.com */ #include <math.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" int ref_mish_uint8(struct ir_tensor *input_tensor, struct ir_tensor *output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int batch = input_tensor->dims[0]; int size = h * w; int c_step = h * w; int batch_step = c_step * channels; int total_size = batch_step * batch; // dequant uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; float* data_fp32 = sys_malloc(total_size * sizeof(float)); for(int i = 0; i < total_size; i++) data_fp32[i] = ((float) input_uint8[i] - (float)input_zero) * input_scale; for (int n = 0; n < batch; n++) { //#pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = data_fp32 + batch_step * n + c_step * q; float* dst = data_fp32 + batch_step * n + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i] * tanhf(log(1 + exp(src[i]))); } } } // quant for(int i=0; i<total_size; i++) { int udata = round(data_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(data_fp32); return 0; } int ref_mish_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i] * tanhf(log(1 + exp(src[i]))); } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if(input_tensor->data_type == TENGINE_DT_FP32) ret = ref_mish_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_mish_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* node = exec_node->ir_node; struct ir_graph* ir_graph = node->graph; struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_mish_hcl_ops(void* arg) { return register_builtin_node_ops(OP_MISH, &hcl_node_ops); } static int unreg_mish_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_MISH, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_mish_hcl_ops); AUTO_UNREGISTER_OPS(unreg_mish_hcl_ops);
activation_functions.h
#ifndef ANAKIN_SABER_FUNCS_IMPL_X86_MATH_ACTIVATION_FUNCTIONS_H #define ANAKIN_SABER_FUNCS_IMPL_X86_MATH_ACTIVATION_FUNCTIONS_H #include <math.h> #include <string> #include "saber/saber_types.h" #include "utils/logger/logger.h" #include "saber/funcs/impl/x86/saber_avx2_math.h" namespace anakin { namespace saber { namespace math { template <typename T> void sigmoid(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { y[i] = 1. / (1. + exp(-x[i])); } } template <typename T> void parallel_sigmoid(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { y[i] = 1. / (1. + exp(-x[i])); } } template <typename T> void relu(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { y[i] = x[i] < 0 ? 0 : x[i]; } } template <typename T> void parallel_relu(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { y[i] = x[i] < 0 ? 0 : x[i]; } } template <typename T> void tanh(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { T e_x = exp(2 * x[i]); y[i] = (e_x - 1) / (e_x + 1); } } template <typename T> void parallel_tanh(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { T e_x = exp(2 * x[i]); y[i] = (e_x - 1) / (e_x + 1); } } template <typename T> void stanh(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { T e_x = exp(4. * x[i] / 3.); y[i] = 1.7159 * (e_x - 1) / (e_x + 1); } } template <typename T> void parallel_stanh(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { T e_x = exp(4. * x[i] / 3.); y[i] = 1.7159 * (e_x - 1) / (e_x + 1); } } template <typename T> void identity(size_t len, T *x, T *y) { for (size_t i = 0; i < len; i++) { y[i] = x[i]; } } template <typename T> void parallel_identity(size_t len, T *x, T *y) { #pragma omp parallel for for (size_t i = 0; i < len; i++) { y[i] = x[i]; } } template <typename T> struct Active { typedef void (*Act)(size_t, T*, T*); typedef T (*Act_m256)(T); }; static Active<float>::Act k_act_float[] = { nullptr, &sigmoid<float>, &relu<float>, &tanh<float>, nullptr, nullptr, &identity<float>, &sigmoid<float>, &tanh<float>, &stanh<float> }; static Active<float>::Act k_parallel_act_float[] = { nullptr, &parallel_sigmoid<float>, &parallel_relu<float>, &parallel_tanh<float>, nullptr, nullptr, &parallel_identity<float>, &parallel_sigmoid<float>, &parallel_tanh<float>, &parallel_stanh<float> }; inline void activation(size_t len, float *src, float *dst, int index) { auto *func = k_act_float[index]; if (!func) { LOG(ERROR) << "activation not implemented!"; } func(len, src, dst); } inline void parallel_activation(size_t len, float *src, float *dst, int index) { auto *func = k_parallel_act_float[index]; if (!func) { LOG(ERROR) << "activation not implemented!"; } func(len, src, dst); } #ifdef __AVX__ inline __m256 Exp(__m256 a) { return exp256_ps(a); } inline __m256 Relu(const __m256 a) { __m256 tmp = _mm256_set1_ps(0.0f); return _mm256_max_ps(a, tmp); } inline __m256 Sigmoid(const __m256 a) { __m256 tmp = _mm256_sub_ps(_mm256_set1_ps(0.0f), a); tmp = Exp(tmp); tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); tmp = _mm256_div_ps(_mm256_set1_ps(1.0f), tmp); return tmp; } inline __m256 Tanh(const __m256 a) { __m256 tmp = _mm256_mul_ps(_mm256_set1_ps(-2.0f), a); tmp = Exp(tmp); return _mm256_sub_ps(_mm256_div_ps(_mm256_set1_ps(2.0f), _mm256_add_ps(_mm256_set1_ps(1.0f), tmp)), _mm256_set1_ps(1.0f)); } inline __m256 Identity(const __m256 a) { return a; } static Active<__m256>::Act_m256 k_act_avx[] = { nullptr, &Sigmoid, &Relu, &Tanh, nullptr, nullptr, &Identity, &Sigmoid, &Tanh, nullptr }; inline __m256 avx_activation(__m256 a, int index) { return k_act_avx[index](a); } #endif } // namespace math } // namespace saber } // namespace anakin #endif //ANAKIN_SABER_FUNCS_IMPL_X86_MATH_ACTIVATION_FUNCTIONS_H
020_pi.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> double compute_partial_pi(long nr_tries, unsigned int *seed); int main() { const long nr_tries = 1000000; const int nr_blocks = 10; int num_threads = 1; double nr_success = 0.0; #pragma omp parallel default(none) shared(nr_success) shared(num_threads) { int thread_num = 0; unsigned int seed = 0; #ifdef __OPENMP thread_num = omp_get_thread_num(); num_threads = omp_get_num_threads(); seed = thread_num; #endif printf("thread %d of %d\n", thread_num, num_threads); long partial_nr_tries = nr_tries/(num_threads*nr_blocks); #pragma omp for reduction(+:nr_success) for (int i = 0; i < nr_blocks; i++) nr_success += compute_partial_pi(partial_nr_tries, &seed); } printf("pi = %.15lf\n", 4.0*nr_success/(num_threads*nr_blocks)); return 0; } double random_number(unsigned int *seed) { return ((double) rand_r(seed))/RAND_MAX; } double compute_partial_pi(long nr_tries, unsigned int *seed) { double x, y, nr_success = 0.0; for (long i = 0; i < nr_tries; i++) { x = random_number(seed); y = random_number(seed); if (x*x + y*y < 1.0) nr_success += 1.0; } #pragma omp barrier return nr_success/nr_tries; }
simd-3.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #define N 100 #define EPS 0.0000000000000001 #include <stdlib.h> void init(double *a, double *a_ref, double *b, int n) { int i, s = -1; for ( i = 0; i < n; i++ ) { a[i] = i*i*s; a_ref[i] = a[i]; b[i] = i+i; s = -s; } } double work( double *a, double *b, int n ) { int i; double tmp, sum; sum = 0.0; #pragma omp simd private(tmp) reduction(+:sum) for (i = 0; i < n; i++) { tmp = a[i] + b[i]; sum += tmp; } return sum; } double work_ref( double *a, double *b, int n ) { int i; double tmp, sum; sum = 0.0; for (i = 0; i < n; i++) { tmp = a[i] + b[i]; sum += tmp; } return sum; } int main () { double a[N], a_ref[N], b[N], res, ref, diff; init(a, a_ref, b, N); res = work(a, b, N); ref = work_ref(a_ref, b, N); diff = res - ref; if (diff > EPS || -diff > EPS) abort (); return 0; }
adam_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <math.h> // for sqrt in CPU and CUDA #include <Eigen/Dense> #include <string> #include <unordered_map> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/algorithm.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/for_range.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { namespace scatter = paddle::operators::math::scatter; static inline float GetAttrFromTensor(const framework::Tensor* tensor) { const float* tensor_data = tensor->data<float>(); framework::Tensor cpu_tensor; if (platform::is_gpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } if (platform::is_xpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } return tensor_data[0]; } class AdamOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const override; }; struct GPUAdam; struct CPUAdam; template <typename T, typename Flavour> class AdamFunctor; template <typename T> class AdamFunctor<T, GPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} inline HOSTDEVICE void operator()(size_t i) const { // Merge all memory access together. T g = grad_[i]; T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } }; template <typename T> class AdamFunctor<T, CPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} void operator()(size_t numel) const { Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{ grad_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{ moment1_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{ moment2_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{ param_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{ param_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{ moment1_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{ moment2_out_, static_cast<Eigen::Index>(numel)}; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); moment1_out = beta1_ * mom1 + (1 - beta1_) * g; moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_ * sqrt(1 - beta2_pow))); } }; template <typename T, typename Flavour, typename MT = T> class SparseAdamFunctor; template <typename T, typename MT> class SparseAdamFunctor<T, GPUAdam, MT> { private: MT beta1_; MT beta2_; MT epsilon_; const MT* beta1_pow_; const MT* beta2_pow_; const MT* moment1_; MT* moment1_out_; const MT* moment2_; MT* moment2_out_; const MT* lr_; const T* grad_; const T* param_; T* param_out_; const MT* master_param_; MT* master_param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; bool lazy_mode_; public: SparseAdamFunctor(MT beta1, MT beta2, MT epsilon, const MT* beta1_pow, const MT* beta2_pow, const MT* mom1, MT* mom1_out, const MT* mom2, MT* mom2_out, const MT* lr, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), master_param_(master_param), master_param_out_(master_param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count), lazy_mode_(lazy_mode) {} inline HOSTDEVICE void adam_update(size_t i, MT g) const { // The following code is the same as dense MT mom1 = moment1_[i]; MT mom2 = moment2_[i]; MT lr = *lr_; MT beta1_pow = *beta1_pow_; MT beta2_pow = *beta2_pow_; MT p = master_param_ ? master_param_[i] : static_cast<MT>(param_[i]); // Calculation lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) / (static_cast<MT>(1.0) - beta1_pow); mom1 = beta1_ * mom1 + (static_cast<MT>(1.0) - beta1_) * g; mom2 = beta2_ * mom2 + (static_cast<MT>(1.0) - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(static_cast<MT>(1.0) - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = static_cast<T>(p); if (master_param_out_) { master_param_out_[i] = p; } } inline HOSTDEVICE void operator()(size_t i) const { auto row_idx = math::BinarySearch<int64_t>(rows_, row_count_, i / row_numel_); if (lazy_mode_ && row_idx < 0) { return; } else { MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel_ + i % row_numel_]) : static_cast<MT>(0); adam_update(i, g); } } }; template <typename T> class SparseAdamFunctor<T, CPUAdam, T> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; public: SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count) {} inline HOSTDEVICE void adam_update(size_t i, T g) const { // The following code is the same as dense T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } inline void operator()(size_t numel) const { // lr could be reuse T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); int64_t row_count = static_cast<int64_t>(numel / row_numel_); for (int64_t i = 0, j = 0; i != row_count; ++i) { if (i == *(rows_ + j)) { for (int64_t k = 0; k != row_numel_; ++k) { T g = grad_[j * row_numel_ + k]; adam_update(i * row_numel_ + k, g); } ++j; } else { for (int64_t k = 0; k != row_numel_; ++k) { T mom1 = moment1_[i * row_numel_ + k]; T mom2 = moment2_[i * row_numel_ + k]; T p = param_[i * row_numel_ + k]; mom1 = beta1_ * mom1; mom2 = beta2_ * mom2; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); // Write back to global memory moment1_out_[i * row_numel_ + k] = mom1; moment2_out_[i * row_numel_ + k] = mom2; param_out_[i * row_numel_ + k] = p; } } } } }; template <typename DeviceContext, typename T> class AdamOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true, platform::errors::InvalidArgument( "The Var(%s)'s type should be LoDTensor, " "but the received is %s", ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; int64_t min_row_size_to_use_multithread = ctx.Attr<int64_t>("min_row_size_to_use_multithread"); bool lazy_mode = ctx.Attr<bool>("lazy_mode"); bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow"); VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; auto* param = ctx.Input<LoDTensor>("Param"); auto* grad_var = ctx.InputVar("Grad"); auto* mom1 = ctx.Input<LoDTensor>("Moment1"); auto* mom2 = ctx.Input<LoDTensor>("Moment2"); auto* lr = ctx.Input<LoDTensor>("LearningRate"); auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow"); auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow"); auto* param_out = ctx.Output<LoDTensor>("ParamOut"); auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out"); auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out"); auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut"); auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut"); bool skip_update = false; if (ctx.HasInput("SkipUpdate")) { auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate"); PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(SkipUpdate) size must be 1, but get %d", skip_update_tensor->numel())); std::vector<bool> skip_update_vec; paddle::framework::TensorToVector(*skip_update_tensor, ctx.device_context(), &skip_update_vec); skip_update = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update) { VLOG(4) << "Adam skip update"; framework::TensorCopy( *param, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), param_out); framework::TensorCopy( *mom1, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom1_out); framework::TensorCopy( *mom2, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom2_out); framework::TensorCopy( *beta1_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta1_pow_out); framework::TensorCopy( *beta2_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta2_pow_out); return; } T beta1 = static_cast<T>(ctx.Attr<float>("beta1")); if (ctx.HasInput("Beta1Tensor")) { auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor"); PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta1Tensor) size must be 1, but get %d", beta1_tensor->numel())); beta1 = static_cast<T>(GetAttrFromTensor(beta1_tensor)); } T beta2 = static_cast<T>(ctx.Attr<float>("beta2")); if (ctx.HasInput("Beta2Tensor")) { auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor"); PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta2Tensor) size must be 1, but get %d", beta2_tensor->numel())); beta2 = static_cast<T>(GetAttrFromTensor(beta2_tensor)); } T epsilon = static_cast<T>(ctx.Attr<float>("epsilon")); if (ctx.HasInput("EpsilonTensor")) { auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor"); PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(EpsilonTensor) size must be 1, but get %d", epsilon_tensor->numel())); epsilon = static_cast<T>(GetAttrFromTensor(epsilon_tensor)); } VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel() << "beta2_pow.numel() : " << beta2_pow->numel(); VLOG(3) << "param.numel(): " << param->numel(); PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); if (grad_var->IsType<framework::LoDTensor>()) { T beta1_p = beta1_pow->data<T>()[0]; T beta2_p = beta2_pow->data<T>()[0]; if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } auto* grad = ctx.Input<LoDTensor>("Grad"); T* param_out_ptr = param_out->mutable_data<T>(ctx.GetPlace()); T* mom1_out_ptr = mom1_out->mutable_data<T>(ctx.GetPlace()); T* mom2_out_ptr = mom2_out->mutable_data<T>(ctx.GetPlace()); T learning_rate = lr->data<T>()[0] * (sqrt(1 - beta2_p) / (1 - beta1_p)); T eps = epsilon * sqrt(1 - beta2_p); jit::adam_attr_t attr(beta1, beta2); int64_t numel = param->numel(); const T* param_ptr = param->data<T>(); const T* mom1_ptr = mom1->data<T>(); const T* mom2_ptr = mom2->data<T>(); const T* grad_ptr = grad->data<T>(); auto adam = jit::KernelFuncs<jit::AdamTuple<T>, platform::CPUPlace>::Cache().At( attr); static constexpr int64_t chunk_size = 512; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < numel / chunk_size; ++i) { const int64_t offset = i * chunk_size; adam(beta1, beta2, -learning_rate, eps, chunk_size, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } if (numel % chunk_size != 0) { const int64_t offset = (numel / chunk_size) * chunk_size; const int64_t tail_numel = numel % chunk_size; adam(beta1, beta2, -learning_rate, eps, tail_numel, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } } else if (grad_var->IsType<pten::SelectedRows>()) { auto* grad = ctx.Input<pten::SelectedRows>("Grad"); if (grad->rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } pten::SelectedRows tmp_grad_merge; const pten::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor scatter::MergeAdd<DeviceContext, T> merge_func; merge_func(ctx.template device_context<DeviceContext>(), *grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor<T, CPUAdam> functor( beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(), mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()), mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()), lr->data<T>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), rows, row_numel, grad_merge.rows().size(), lazy_mode); // update beta1 and beta2 if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } if (lazy_mode) { VLOG(3) << "run cpu lazy mode"; size_t row_count = grad_merge.rows().size(); std::vector<int64_t> cpu_rows(grad_merge.rows()); for (size_t row_index = 0; row_index < row_count; ++row_index) { for (size_t offset = 0; offset < row_numel; ++offset) { size_t i = cpu_rows[row_index] * row_numel + offset; functor.adam_update(i, grad_data[row_index * row_numel + offset]); } } } #ifndef _WIN32 else if (FLAGS_inner_op_parallelism > 1 && // NOLINT min_row_size_to_use_multithread > 0 && param->dims()[0] > min_row_size_to_use_multithread) { VLOG(3) << "use multi thread, inner_op_parallelism=" << FLAGS_inner_op_parallelism << " min_row_size_to_use_multithread=" << min_row_size_to_use_multithread; if (FLAGS_inner_op_parallelism > 10) { VLOG(1) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism << " is two large!"; } auto& grad_rows = grad_merge.rows(); std::unordered_map<size_t, int> row_id_to_grad_row_offset; size_t param_row_count = param->numel() / row_numel; if (param_row_count < 1000) { VLOG(1) << "param_row_count should be larger then 1000 to use " "multi thread, currently " << param_row_count; } for (size_t i = 0; i < grad_rows.size(); ++i) { row_id_to_grad_row_offset[grad_rows[i]] = i; } std::vector<std::future<void>> fs; int64_t line_in_each_thread = param_row_count / FLAGS_inner_op_parallelism + 1; for (int i = 0; i < FLAGS_inner_op_parallelism; ++i) { int64_t start = i * line_in_each_thread; int64_t end = (i + 1) * line_in_each_thread; if (start >= static_cast<int64_t>(param_row_count)) { break; } if (end > static_cast<int64_t>(param_row_count)) { end = static_cast<int64_t>(param_row_count); } fs.push_back(framework::Async([&functor, &row_id_to_grad_row_offset, &grad_data, row_numel, start, end]() { for (int64_t row_id = start; row_id < end; ++row_id) { auto iter = row_id_to_grad_row_offset.find(row_id); if (iter != row_id_to_grad_row_offset.end()) { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update( row_id * row_numel + row_offset, grad_data[iter->second * row_numel + row_offset]); } } else { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update(row_id * row_numel + row_offset, 0); } } } })); } for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } #endif // !_WIN32 else { // NOLINT functor(param->numel()); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Variable type not supported by adam_op")); } } }; } // namespace operators } // namespace paddle
thdat.c
/* * Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the * following conditions are met: * * 1. Redistributions of source code must retain this list * of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce this * list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <thtk/thtk.h> #include "program.h" #include "util.h" #include "mygetopt.h" static void print_usage( void) { printf("Usage: %s [-V] [[-c | -l | -x] VERSION] [ARCHIVE [FILE...]]\n" "Options:\n" " -c create an archive\n" " -l list the contents of an archive\n" " -x extract an archive\n" " -V display version information and exit\n" "VERSION can be:\n" " 1, 2, 3, 4, 5, 6, 7, 8, 9, 95, 10, 103 (for Uwabami Breakers), 105, 11, 12, 123, 125, 128, 13, 14, 143, 15, 16, 165 or 17\n" /* NEWHU: */ "Specify 'd' as VERSION to automatically detect archive format. (-l and -x only)\n\n" "Report bugs to <" PACKAGE_BUGREPORT ">.\n", argv0); } static void print_error( thtk_error_t* error) { fprintf(stderr, "%s:%s\n", argv0, thtk_error_message(error)); } typedef struct { thdat_t* thdat; thtk_io_t* stream; } thdat_state_t; static thdat_state_t* thdat_state_alloc(void) { thdat_state_t* state = malloc(sizeof(*state)); state->thdat = NULL; state->stream = NULL; return state; } static void thdat_state_free( thdat_state_t* state) { if (state) { if (state->thdat) thdat_free(state->thdat); if (state->stream) thtk_io_close(state->stream); free(state); } } static thdat_state_t* thdat_open_file( unsigned int version, const char* path, thtk_error_t** error) { thdat_state_t* state = thdat_state_alloc(); if (!(state->stream = thtk_io_open_file(path, "rb", error))) { thdat_state_free(state); return NULL; } if (!(state->thdat = thdat_open(version, state->stream, error))) { thdat_state_free(state); return NULL; } return state; } static int thdat_extract_file( thdat_state_t* state, size_t entry_index, thtk_error_t** error) { const char* entry_name; thtk_io_t* entry_stream; if (!(entry_name = thdat_entry_get_name(state->thdat, entry_index, error))) return 0; // For th105: Make sure that the directory exists util_makepath(entry_name); if (!(entry_stream = thtk_io_open_file(entry_name, "wb", error))) return 0; if (thdat_entry_read_data(state->thdat, entry_index, entry_stream, error) == -1) { thtk_io_close(entry_stream); return 0; } printf("%s\n", entry_name); thtk_io_close(entry_stream); return 1; } static int thdat_list( unsigned int version, const char* path, thtk_error_t** error) { thdat_state_t* state = thdat_open_file(version, path, error); if(!state) { return 0; } ssize_t entry_count; struct { const char* name; ssize_t size; ssize_t zsize; }* entries; ssize_t e; int name_width = 4; if ((entry_count = thdat_entry_count(state->thdat, error)) == -1) { thdat_state_free(state); return 0; } entries = malloc(entry_count * sizeof(*entries)); #pragma omp parallel /* reduction(max:name_width) */ { #pragma omp for for (e = 0; e < entry_count; ++e) { thtk_error_t* error = NULL; entries[e].name = thdat_entry_get_name(state->thdat, e, &error); entries[e].size = thdat_entry_get_size(state->thdat, e, &error); entries[e].zsize = thdat_entry_get_zsize(state->thdat, e, &error); if (!entries[e].name || entries[e].size == -1 || entries[e].zsize == -1) { print_error(error); thtk_error_free(&error); continue; } int entry_name_width = strlen(entries[e].name); #pragma omp critical if (entry_name_width > name_width) name_width = entry_name_width; } } // th105: Stored = Size if (version == 105 || version == 123) printf("%-*s %7s\n", name_width, "Name", "Size"); else printf("%-*s %7s %7s\n", name_width, "Name", "Size", "Stored"); for (e = 0; e < entry_count; ++e) { if (version == 105 || version == 123) printf("%-*s %7zd\n", name_width, entries[e].name, entries[e].size); else printf("%-*s %7zd %7zd\n", name_width, entries[e].name, entries[e].size, entries[e].zsize); } free(entries); thdat_state_free(state); return 1; } static int thdat_create_wrapper( unsigned int version, const char* path, const char** paths, size_t entry_count, thtk_error_t** error) { thdat_state_t* state = thdat_state_alloc(); char*** entries = calloc(entry_count, sizeof(char**)); char** realpaths; int* entries_count = calloc(entry_count, sizeof(int)); size_t real_entry_count = 0; if (!(state->stream = thtk_io_open_file(path, "wb", error))) { thdat_state_free(state); exit(1); } for (size_t i = 0; i < entry_count; i++) { int n = util_scan_files(paths[i], &entries[i]); if (n == -1) { entries[i] = calloc(1, sizeof(char*)); entries[i][0] = malloc(strlen(paths[i])+1); strcpy(entries[i][0], paths[i]); n = 1; } entries_count[i] = n; real_entry_count += n; } if (!(state->thdat = thdat_create(version, state->stream, real_entry_count, error))) { thdat_state_free(state); exit(1); } // Set entry names first... realpaths = calloc(real_entry_count, sizeof(char*)); size_t k = 0; for (size_t i = 0; i < entry_count; ++i) { thtk_error_t* error = NULL; for (size_t j = 0; j < entries_count[i]; j++) { if (!thdat_entry_set_name(state->thdat, k, entries[i][j], &error)) { print_error(error); thtk_error_free(&error); continue; } realpaths[k] = malloc(strlen(entries[i][j])+1); strcpy(realpaths[k], entries[i][j]); k++; free(entries[i][j]); } free(entries[i]); } free(entries); free(entries_count); // ...and then module->create, if this is th105 archive. // This is because the list of entries comes first in th105 archives. if (version == 105 || version == 123) { if (!thdat_init(state->thdat, error)) { thdat_state_free(state); exit(1); } } k = 0; /* TODO: Properly indicate when insertion fails. */ ssize_t i; #pragma omp parallel for schedule(dynamic) for (i = 0; i < real_entry_count; ++i) { thtk_error_t* error = NULL; thtk_io_t* entry_stream; off_t entry_size; printf("%s...\n", thdat_entry_get_name(state->thdat, i, &error)); // Is entry name set? if (!(thdat_entry_get_name(state->thdat, i, &error))[0]) continue; if (!(entry_stream = thtk_io_open_file(realpaths[i], "rb", &error))) { print_error(error); thtk_error_free(&error); continue; } if ((entry_size = thtk_io_seek(entry_stream, 0, SEEK_END, &error)) == -1) { print_error(error); thtk_error_free(&error); continue; } if (thtk_io_seek(entry_stream, 0, SEEK_SET, &error) == -1) { print_error(error); thtk_error_free(&error); continue; } if (thdat_entry_write_data(state->thdat, i, entry_stream, entry_size, &error) == -1) { print_error(error); thtk_error_free(&error); continue; } thtk_io_close(entry_stream); free(realpaths[i]); } free(realpaths); int ret = 1; if (!thdat_close(state->thdat, error)) ret = 0; thdat_state_free(state); return ret; } /* TODO: Make sure errors are printed in all cases. */ int main( int argc, char* argv[]) { thtk_error_t* error = NULL; unsigned int version = 0; int mode = -1; argv0 = util_shortname(argv[0]); int opt; int ind=0; while(argv[util_optind]) { switch(opt = util_getopt(argc, argv, ":c:l:x:Vd")) { case 'c': case 'l': case 'x': case 'd': if(mode != -1) { fprintf(stderr,"%s: More than one mode specified\n",argv0); print_usage(); exit(1); } mode = opt; if((opt == 'x' || mode == 'l') && *util_optarg == 'd') { version = ~0; } else if(opt != 'd') version = parse_version(util_optarg); break; default: util_getopt_default(&ind,argv,opt,print_usage); } } argc = ind; argv[argc] = NULL; /* detect version */ if(argc && (mode == 'x' || mode == 'l') && version == ~0) { thtk_io_t* file; if(!(file = thtk_io_open_file(argv[0], "rb", &error))) { print_error(error); thtk_error_free(&error); exit(1); } uint32_t out[4]; unsigned int heur; printf("Detecting '%s'...\n",argv[0]); if(-1 == thdat_detect(argv[0], file, out, &heur, &error)) { thtk_io_close(file); print_error(error); thtk_error_free(&error); exit(1); } if(heur == -1) { const thdat_detect_entry_t* ent; printf("Couldn't detect version!\nPossible versions: "); while((ent = thdat_detect_iter(out))) { printf("%d,",ent->alias); } printf("\n"); thtk_io_close(file); exit(1); } else { printf("Detected version %d\n",heur); version = heur; } thtk_io_close(file); } switch (mode) { case 'd': { if (argc < 1) { print_usage(); exit(1); } thtk_io_t* file; if (!(file = thtk_io_open_file(argv[0], "rb", &error))) { print_error(error); thtk_error_free(&error); exit(1); } uint32_t out[4]; unsigned int heur; printf("Detecting '%s'... ",argv[0]); if (-1 == thdat_detect(argv[0], file, out, &heur, &error)) { printf("\n"); thtk_io_close(file); print_error(error); thtk_error_free(&error); exit(1); } const thdat_detect_entry_t* ent; printf("%d | possible versions: ", heur); while((ent = thdat_detect_iter(out))) { printf("%d,",ent->alias); } printf(" | filename: %d\n", thdat_detect_filename(argv[0])); thtk_io_close(file); exit(0); } case 'l': { if (argc < 1) { print_usage(); exit(1); } if (!thdat_list(version, argv[0], &error)) { print_error(error); thtk_error_free(&error); exit(1); } exit(0); } case 'c': { if (argc < 2) { print_usage(); exit(1); } if (!thdat_create_wrapper(version, argv[0], (const char**)&argv[1], argc - 1, &error)) { print_error(error); thtk_error_free(&error); exit(1); } exit(0); } case 'x': { if (argc < 1) { print_usage(); exit(1); } thdat_state_t* state = thdat_open_file(version, argv[0], &error); if (!state) { print_error(error); thtk_error_free(&error); exit(1); } if (argc > 1) { ssize_t a; #pragma omp parallel for schedule(dynamic) for (a = 1; a < argc; ++a) { thtk_error_t* error = NULL; int entry_index; if ((entry_index = thdat_entry_by_name(state->thdat, argv[a], &error)) == -1) { print_error(error); thtk_error_free(&error); continue; } if (!thdat_extract_file(state, entry_index, &error)) { print_error(error); thtk_error_free(&error); continue; } } } else { ssize_t entry_count; if ((entry_count = thdat_entry_count(state->thdat, &error)) == -1) { print_error(error); thtk_error_free(&error); exit(1); } ssize_t entry_index; #pragma omp parallel for schedule(dynamic) for (entry_index = 0; entry_index < entry_count; ++entry_index) { thtk_error_t* error = NULL; if (!thdat_extract_file(state, entry_index, &error)) { print_error(error); thtk_error_free(&error); continue; } } } thdat_state_free(state); exit(0); } default: print_usage(); exit(1); } }
Interp1PrimFirstOrderUpwind.c
/*! @file Interp1PrimFirstOrderUpwind.c * @brief First order upwind Scheme (Component-wise application to vectors). * @author Debojyoti Ghosh */ #include <stdio.h> #include <stdlib.h> #include <basic.h> #include <arrayfunctions.h> #include <interpolation.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 1 /*! @brief 1st order upwind reconstruction (component-wise) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the 1st order upwind scheme on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 1st order upwind numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as: \f{equation}{ \hat{\bf f}_{j+1/2} = \left\{\begin{array}{cc} {\bf f}_{j} & {\rm upw} > 0 \\ {\bf f}_{j+1} & {\rm upw} \le 0 \end{array}\right.. \f} \b Implementation \b Notes: + The scalar interpolation method is applied to the vector function in a component-wise manner. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). */ int Interp1PrimFirstOrderUpwind( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); int i; #pragma omp parallel for schedule(auto) default(shared) private(i,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { indexC[dir] = (upw > 0 ? indexI[dir]-1 : indexI[dir]); int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0 ,p); int q; _ArrayIndex1D_(ndims,dim ,indexC,ghosts,q); int v; for (v=0; v<nvars; v++) fI[p*nvars+v] = fC[q*nvars+v]; } } return(0); }
lowDimBD2.c
#include <R.h> #include <stdint.h> #include <x86intrin.h> #include <omp.h> #define min(A,B) ((A) < (B) ? (A) : (B)) #define max(A,B) ((A) > (B) ? (A) : (B)) #define Data(i,j) data[(j) * n + (i)] //R uses column-major order #define Sorted(i,j) sorted[(j) * n + (i)] #define Idx(i,j) idx[(j) * n + (i)] #define Rank(i,j) rank[(j) * n + (i)] #define Ticks(t,j) ticks[(j) * numTick + (t)] #define MaxSample 64 #define TICK 7 #define OneTick 128 //2^TICK functions per tick #define WORD 6 //uint64_t, 2^6 #define OneWord 64 #define MASK 0x0000003F //6 1's #define ONES 0xFFFFFFFFFFFFFFFFull //64 1's struct node { double val; unsigned idx; }; typedef struct node node; static node *toSort; static unsigned n, m, numThread, *idx, *rank, numTick, vecLen; static uint64_t *count; static double *sorted; static uint64_t **ticks; //numTick * m static unsigned threeAs; //3^m static uint64_t ***ltVec, ***eqVec, ***gtVec; static int8_t **first, **second; static uint64_t oneBit[64]; //oneBit[0] 0001 //oneBit[1] 0010 //oneBit[2] 0100 //oneBit[3] 1000 //etc //oneBit[63] static int cmpNode(const void *a, const void *b) { double x, y; x = ((node*) a)->val; y = ((node*) b)->val; //descending order if (x > y) return -1; if (x < y) return 1; return 0; } static void sortFunc(double *data) { unsigned i, j; for (j = 0; j < m; j++) { for (i = 0; i < n; i++) { toSort[i].val = Data(i, j); toSort[i].idx = i; } qsort((void*)toSort, n, sizeof(node), cmpNode); for (i = 0; i < n; i++) { Idx(i, j) = toSort[i].idx; //which func has rank i at sample j Sorted(i, j) = toSort[i].val; //func value at rank i, sample j Rank(toSort[i].idx, j) = i; //rank of function i at sample j } } } static void initTick(void) { unsigned i, j, k, t, id, w, o; for (j = 0; j < m; j++) { for (k = 0; k < vecLen; k++) Ticks(0, j)[k] = 0; i = 0; for (t = 1; t < numTick; t++) { for (k = 0; k < vecLen; k++) Ticks(t, j) [k] = Ticks(t - 1, j) [k]; while (i < t * OneTick) { id = Idx(i, j), w = id >> WORD, o = id & MASK; Ticks(t, j) [w] |= oneBit[o]; i++; } } } } inline uint64_t countCount(unsigned tid) { unsigned i, j, k, o, r, s, numAbove, numBelow, numEqual; uint64_t cnt, above, below, equal; numEqual = 0; for (k = 0; k < vecLen; k++) { equal = ONES; for (s = 0; s < m; s++) equal &= eqVec[tid] [s] [k]; numEqual += _mm_popcnt_u64(equal); } cnt = (n - numEqual) * numEqual + numEqual * (numEqual - 1) / 2; for (i = 1; i < threeAs; i++) { o = i; for (s = 0; s < m; s++) { r = o % 3, o = o / 3; first[tid] [s] = (r == 0) ? 0 : (r == 1) ? -1 : 1; } for (j = i + 1; j < threeAs; j++) { o = j; for (s = 0; s < m; s++) { r = o % 3, o = o / 3; second[tid] [s] = (r == 0) ? 0 : (r == 1) ? -1 : 1; } //-1, 0, 1 for <, =, > //for a properly formed band, first[s] * second[s] <= 0, for all s for (s = 0; s < m; s++) if (first[tid] [s] * second[tid] [s] == 1) break; if (s < m) continue; numAbove = numBelow = 0; for (k = 0; k < vecLen; k++) { above = below = ONES; for (s = 0; s < m; s++) { above &= (first[tid] [s] == -1) ? ltVec[tid] [s] [k] : (first[tid] [s] == 0) ? eqVec[tid] [s] [k] : gtVec[tid] [s] [k]; below &= (second[tid] [s] == -1) ? ltVec[tid] [s] [k] : (second[tid] [s] == 0) ? eqVec[tid] [s] [k] : gtVec[tid] [s] [k]; } numAbove += _mm_popcnt_u64(above); numBelow += _mm_popcnt_u64(below); } cnt += numAbove * numBelow; } } return cnt; } static void calcDepth(void) { //f func, r rank, s sample, t tick, w word, o offset unsigned f, k, r, s, t, id, w, o, threadID; int i; #pragma omp parallel for private(i,k,r,s,t,id,w,o,threadID) for (f = 0; f < n; f++) { threadID = omp_get_thread_num(); for (s = 0; s < m; s++) { r = Rank(f, s); t = r >> TICK; for (k = 0; k < vecLen; k++) gtVec[threadID] [s] [k] = Ticks(t, s) [k]; i = t << TICK; while (i < n && Sorted(i, s) >= Sorted(r, s)) { id = Idx(i, s), w = id >> WORD, o = id & MASK; gtVec[threadID] [s] [w] |= oneBit[o]; i++; } for (k = 0; k < vecLen; k++) ltVec[threadID] [s] [k] = ~ gtVec[threadID] [s] [k]; while ((i - 1) >= 0 && Sorted(i - 1, s) <= Sorted(r, s)) { i--; id = Idx(i, s), w = id >> WORD, o = id & MASK; ltVec[threadID] [s] [w] |= oneBit[o]; } for (k = 0; k < vecLen; k++) { eqVec[threadID] [s] [k] = gtVec[threadID] [s] [k] & ltVec[threadID] [s] [k]; gtVec[threadID] [s] [k] &= ~ eqVec[threadID] [s] [k]; ltVec[threadID] [s] [k] &= ~ eqVec[threadID] [s] [k]; } //padded functions, id >= n, are less than all functions for (id = n; id < (vecLen << WORD); id++) { w = id >> WORD, o = id & MASK; ltVec[threadID] [s] [w] &= ~ oneBit[o]; } } count[f] = countCount(threadID); } } void lowDimBD2(int *row, int *col, double *data, double *depth) { unsigned i, j; n = *row; m = *col; if (n <= OneTick) { fprintf(stderr, "minimum %u rows\n", OneTick + 1); exit(1); } if (m > MaxSample) { fprintf(stderr, "maximum %u columns\n", MaxSample); exit(1); } numThread = omp_get_max_threads(); omp_set_num_threads(numThread); count = (uint64_t*) malloc(sizeof(uint64_t) * n); toSort = (node*) malloc(sizeof(node) * n); sorted = (double*) malloc(sizeof(double) * n * m); idx = (unsigned*) malloc(sizeof(unsigned) * n * m); rank = (unsigned*) malloc(sizeof(unsigned) * n * m); oneBit[0] = 1; for (i = 1; i < 64; i++) oneBit[i] = oneBit[i - 1] << 1; for (i = 0, threeAs = 1; i < m; i++) threeAs *= 3; //one tick mark every 128 functions numTick = (n >> TICK) + 1; ticks = (uint64_t**)malloc(sizeof(uint64_t*) * numTick * m); //num of 64-bit words vecLen = (n >> WORD) + 1; for (i = 0; i < numTick * m; i++) ticks[i] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); gtVec = (uint64_t***)malloc(sizeof(uint64_t**) * numThread); ltVec = (uint64_t***)malloc(sizeof(uint64_t**) * numThread); eqVec = (uint64_t***)malloc(sizeof(uint64_t**) * numThread); for (i = 0; i < numThread; i++) { gtVec[i] = (uint64_t**)malloc(sizeof(uint64_t*) * m); ltVec[i] = (uint64_t**)malloc(sizeof(uint64_t*) * m); eqVec[i] = (uint64_t**)malloc(sizeof(uint64_t*) * m); for (j = 0; j < m; j++) { gtVec[i] [j] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); ltVec[i] [j] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); eqVec[i] [j] = (uint64_t*)malloc(sizeof(uint64_t) * vecLen); } } first = (int8_t**)malloc(sizeof(int8_t*) * numThread); second = (int8_t**)malloc(sizeof(int8_t*) * numThread); for (i = 0; i < numThread; i++) { first[i] = (int8_t*)malloc(sizeof(int8_t) * MaxSample); second[i] = (int8_t*)malloc(sizeof(int8_t) * MaxSample); } sortFunc(data); initTick(); calcDepth(); for (i = 0; i < n; i++) depth[i] = (double)count[i] / (n * (n - 1.0) / 2.0); free(count); free(toSort); free(sorted); free(idx); free(rank); for (i = 0; i < numTick*m; i++) free(ticks[i]); free(ticks); for (i = 0; i < numThread; i++) { for (j=0; j<m; j++){ free(gtVec[i] [j]); free(ltVec[i] [j]); free(eqVec[i] [j]); } free(gtVec[i]); free(ltVec[i]); free(eqVec[i]); } free(gtVec); free(ltVec); free(eqVec); for (i = 0; i < numThread; i++) { free(first[i]); free(second[i]); } free(first); free(second); }
nodal_two_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2018 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class NodalTwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategy); /// Counted pointer of NodalTwoStepVPStrategy //typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; /// Node type (default is: Node<3>) typedef Node<3> NodeType; /// Geometry type (using with given NodeType) typedef Geometry<NodeType> GeometryType; typedef std::size_t SizeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ NodalTwoStepVPStrategy(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { InitializeStrategy(rSolverConfig); } NodalTwoStepVPStrategy(ModelPart &rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); /* typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new IncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); */ pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuity<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~NodalTwoStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); if (BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize()); if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize()); // const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); // for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl) // { // ierr = itEl->Check(rCurrentProcessInfo); // if (ierr != 0) // break; // } const auto &r_current_process_info = rModelPart.GetProcessInfo(); for (const auto &r_element : rModelPart.Elements()) { ierr = r_element.Check(r_current_process_info); if (ierr != 0) { break; } } // for (ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) // { // ierr = itCond->Check(rCurrentProcessInfo); // if (ierr != 0) // break; // } return ierr; KRATOS_CATCH(""); } bool SolveSolutionStep() override { // Initialize BDF2 coefficients ModelPart &rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; bool converged = false; // bool momentumAlreadyConverged=false; // bool continuityAlreadyConverged=false; unsigned int maxNonLinearIterations = mMaxPressureIter; KRATOS_INFO("\n Solve with nodally_integrated_two_step_vp strategy at t=") << currentTime << "s" << std::endl; if (timeIntervalChanged == true && currentTime > 10 * timeInterval) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; double pressureNorm = 0; double velocityNorm = 0; /* boost::timer solve_step_time; */ this->InitializeSolutionStep(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; if (it == 0) { this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); } this->CalcNodalStrainsAndStresses(); momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); this->CalcNodalStrains(); if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm); } // if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // momentumAlreadyConverged=true; // } // if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // continuityAlreadyConverged=true; // } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1)) { //this->ComputeErrorL2NormCaseImposedG(); //this->ComputeErrorL2NormCasePoiseuille(); this->CalculateAccelerations(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } if ((continuityConverged && momentumConverged) && it > 1) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); converged = true; std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ return converged; } void FinalizeSolutionStep() override { /* this->UpdateStressStrain(); */ } void Initialize() override { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size(); unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) { rNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) { rSpatialDefRate.resize(sizeStrains, false); } noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) { rFgrad.resize(dimension, dimension, false); } noalias(rFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) { rFgradVel.resize(dimension, dimension, false); } noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } this->AssignFluidMaterialToEachNode(itNode); } // } } void UnactiveSliverElements() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); MesherUtilities MesherUtils; double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart); double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size()); double ElementalVolume = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); if (numNodes == (dimension + 1)) { if (dimension == 2) { ElementalVolume = (itElem)->GetGeometry().Area(); } else if (dimension == 3) { ElementalVolume = (itElem)->GetGeometry().Volume(); } if (ElementalVolume < CriticalVolume) { // std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl; (itElem)->Set(ACTIVE, false); } else { (itElem)->Set(ACTIVE, true); } } } } KRATOS_CATCH(""); } void AssignFluidMaterialToEachNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0; itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame; itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff; } void ComputeNodalVolume() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; } } // } } void InitializeSolutionStep() override { this->FillNodalSFDVector(); } void FillNodalSFDVector() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { // ModelPart::NodeIterator NodesBegin; // ModelPart::NodeIterator NodesEnd; // OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); // for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) // { for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { InitializeNodalVariablesForRemeshedDomain(itNode); SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER } } void SetNeighboursOrderToNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodeOrderedNeighbours.size() != neighbourNodes) rNodeOrderedNeighbours.resize(neighbourNodes, false); noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes); rNodeOrderedNeighbours[0] = itNode->Id(); if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id(); } } } void InitializeNodalVariablesForRemeshedDomain(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) rNodalStress.resize(sizeStrains, false); noalias(rNodalStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalDevStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalDevStress.size() != sizeStrains) rNodalDevStress.resize(sizeStrains, false); noalias(rNodalDevStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS_ORDER)) { Vector &rNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodalSFDneighboursOrder.size() != neighbourNodes) rNodalSFDneighboursOrder.resize(neighbourNodes, false); noalias(rNodalSFDneighboursOrder) = ZeroVector(neighbourNodes); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) rNodalSFDneighbours.resize(sizeSDFNeigh, false); noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) rSpatialDefRate.resize(sizeStrains, false); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) rFgrad.resize(dimension, dimension, false); noalias(rFgrad) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) rFgradVel.resize(dimension, dimension, false); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } if (itNode->SolutionStepsDataHas(NODAL_VOLUMETRIC_DEF_RATE)) { itNode->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_EQUIVALENT_STRAIN_RATE)) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; } } void InitializeNonLinearIterations() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { itElem->InitializeNonLinearIteration(rCurrentProcessInfo); } // } } void CalcNodalStrainsAndStresses() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 0.5; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsAndStressesForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } } void CalcNodalStrainsAndStressesForNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double currFirstLame = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { auto &r_stain_tensor2D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; auto &r_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0); r_stress_tensor2D[0] = nodalSigmaTot_xx; r_stress_tensor2D[1] = nodalSigmaTot_yy; r_stress_tensor2D[2] = nodalSigmaTot_xy; auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor2D[0] = nodalSigmaDev_xx; r_dev_stress_tensor2D[1] = nodalSigmaDev_yy; r_dev_stress_tensor2D[2] = nodalSigmaDev_xy; } else if (dimension == 3) { auto &r_stain_tensor3D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2); r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; auto &r_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0); r_stress_tensor3D[0] = nodalSigmaTot_xx; r_stress_tensor3D[1] = nodalSigmaTot_yy; r_stress_tensor3D[2] = nodalSigmaTot_zz; r_stress_tensor3D[3] = nodalSigmaTot_xy; r_stress_tensor3D[4] = nodalSigmaTot_xz; r_stress_tensor3D[5] = nodalSigmaTot_yz; auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor3D[0] = nodalSigmaDev_xx; r_dev_stress_tensor3D[1] = nodalSigmaDev_yy; r_dev_stress_tensor3D[2] = nodalSigmaDev_zz; r_dev_stress_tensor3D[3] = nodalSigmaDev_xy; r_dev_stress_tensor3D[4] = nodalSigmaDev_xz; r_dev_stress_tensor3D[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsForNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); // Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); // double detFgrad=1.0; // Matrix InvFgrad=ZeroMatrix(dimension,dimension); // Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); double detFgrad = 1.0; Matrix nodalFgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); nodalFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefVol = DefX + DefY; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefZ = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } } void CalcNodalStrains() { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 1.0; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void ComputeAndStoreNodalDeformationGradient(ModelPart::NodeIterator itNode, double theta) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); Matrix Fgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; unsigned int neigh_nodes_id = neighb_nodes[i].Id(); unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; if (neigh_nodes_id != other_neigh_nodes_id) { std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl; } Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; } } } itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD) = Fgrad; itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL) = FgradVel; KRATOS_CATCH(""); } void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; /* this->CalculateDisplacements(); */ this->CalculateDisplacementsAndResetNodalVariables(); BaseType::MoveMesh(); BoundaryNormalsCalculationUtilities BoundaryComputation; BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); KRATOS_CATCH(""); } void CalculatePressureVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; } } } void CalculatePressureAcceleration() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval; } } } void CalculateAccelerations() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(NODAL_VOLUME) = 0.0; (i)->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; (i)->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity, Vector &BDFcoeffs) { /* noalias(PreviousAcceleration)=CurrentAcceleration; */ noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration; // std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t) // std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t) // std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t) } void CalculateDisplacements() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); /* if( i->IsFixed(DISPLACEMENT_X) == false ) */ CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; /* if( i->IsFixed(DISPLACEMENT_Y) == false ) */ CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; /* if( i->IsFixed(DISPLACEMENT_Z) == false ) */ CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } } void CalculateDisplacementsAndResetNodalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; if (dimension == 3) { CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } ///// reset Nodal variables ////// Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeSDFNeigh = rNodalSFDneighbours.size(); // unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1; // unsigned int sizeSDFNeigh=neighbourNodes*dimension; i->FastGetSolutionStepValue(NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); noalias(rFgrad) = ZeroMatrix(dimension, dimension); Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } // } } void UpdatePressureAccelerations() { this->CalculateAccelerations(); this->CalculatePressureVelocity(); this->CalculatePressureAcceleration(); } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "NodalTwoStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "NodalTwoStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedMomentum = false; double NormDv = 0; fixedTimeStep = false; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); /* this->SetNeighboursVelocityId(); */ } NormDv = mpMomentumStrategy->Solve(); if (BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "-------------- s o l v e d ! ------------------" << std::endl; if (it == 0) { velocityNorm = this->ComputeVelocityNorm(); } double DvErrorNorm = NormDv / velocityNorm; // double DvErrorNorm = 0; // ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm); unsigned int iterationForCheck = 3; KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << std::endl; // Check convergence if (it == maxIt - 1) { KRATOS_INFO("Iteration") << it << " Final Velocity error: " << DvErrorNorm << std::endl; fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm); } else if (it > iterationForCheck) { fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm); } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return ConvergedMomentum; } bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedContinuity = false; double NormDp = 0; // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } NormDp = mpPressureStrategy->Solve(); if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "The norm of pressure is: " << NormDp << std::endl; if (it == 0) { NormP = this->ComputePressureNorm(); } double DpErrorNorm = NormDp / (NormP); // double DpErrorNorm = 0; // ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm); // Check convergence if (it == maxIt - 1) { KRATOS_INFO("Iteration") << it << " Final Pressure error: " << DpErrorNorm << std::endl; ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm); } else { KRATOS_INFO("Iteration") << it << " Pressure error: " << DpErrorNorm << std::endl; } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return ConvergedContinuity; } bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; errorNormDv = 0; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; errorNormDv = NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } /* else{ */ /* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */ /* } */ if (errorNormDv < mVelocityTolerance) { return true; } else { return false; } } double ComputeVelocityNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; return NormV; } double ComputePressureNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; #pragma omp parallel reduction(+ \ : NormP) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; return NormP; } void ComputeErrorL2NormCaseImposedG() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2Velocity = 0; double sumErrorL2VelocityX = 0; double sumErrorL2VelocityY = 0; double sumErrorL2Pressure = 0; double sumErrorL2TauXX = 0; double sumErrorL2TauYY = 0; double sumErrorL2TauXY = 0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double pressure = itNode->FastGetSolutionStepValue(PRESSURE); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3)); double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3)); double expectedPressure = -posX * (1.0 - posX); double expectedTauXX = 2.0 * (-4.0 * (1 - posX) * posX * (-1.0 + 2.0 * posX) * posY * (1.0 - 3.0 * posY + 2.0 * pow(posY, 2))); double expectedTauYY = 2.0 * (4.0 * posX * (1.0 - 3.0 * posX + 2.0 * pow(posX, 2)) * (1 - posY) * posY * (-1.0 + 2.0 * posY)); double expectedTauXY = (2.0 * (1.0 - 6.0 * posY + 6.0 * pow(posY, 2)) * (1 - posX) * (1 - posX) * pow(posX, 2) - 2.0 * (1.0 - 6.0 * posX + 6.0 * pow(posX, 2)) * (1 - posY) * (1 - posY) * pow(posY, 2)); double nodalErrorVelocityX = velX - expectedVelocityX; double nodalErrorVelocityY = velY - expectedVelocityY; double nodalErrorPressure = pressure - expectedPressure; double nodalErrorTauXX = tauXX - expectedTauXX; double nodalErrorTauYY = tauYY - expectedTauYY; double nodalErrorTauXY = tauXY - expectedTauXY; sumErrorL2Velocity += (pow(nodalErrorVelocityX, 2) + pow(nodalErrorVelocityY, 2)) * nodalArea; sumErrorL2VelocityX += pow(nodalErrorVelocityX, 2) * nodalArea; sumErrorL2VelocityY += pow(nodalErrorVelocityY, 2) * nodalArea; sumErrorL2Pressure += pow(nodalErrorPressure, 2) * nodalArea; sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * nodalArea; sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * nodalArea; sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * nodalArea; // itNode->FastGetSolutionStepValue(NODAL_ERROR_XX)=nodalErrorTauXX; } } double errorL2Velocity = sqrt(sumErrorL2Velocity); double errorL2VelocityX = sqrt(sumErrorL2VelocityX); double errorL2VelocityY = sqrt(sumErrorL2VelocityY); double errorL2Pressure = sqrt(sumErrorL2Pressure); double errorL2TauXX = sqrt(sumErrorL2TauXX); double errorL2TauYY = sqrt(sumErrorL2TauYY); double errorL2TauXY = sqrt(sumErrorL2TauXY); std::ofstream myfileVelocity; myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n"; myfileVelocity.close(); std::ofstream myfileVelocityX; myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app); myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n"; myfileVelocityX.close(); std::ofstream myfileVelocityY; myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app); myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n"; myfileVelocityY.close(); std::ofstream myfilePressure; myfilePressure.open("errorL2PressureFile.txt", std::ios::app); myfilePressure << currentTime << "\t" << errorL2Pressure << "\n"; myfilePressure.close(); std::ofstream myfileTauXX; myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app); myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n"; myfileTauXX.close(); std::ofstream myfileTauYY; myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app); myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n"; myfileTauYY.close(); std::ofstream myfileTauXY; myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app); myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n"; myfileTauXY.close(); } void ComputeErrorL2NormCasePoiseuille() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2VelocityTheta = 0; double sumErrorL2TauTheta = 0; double r_in = 0.2; double R_out = 0.5; double kappa = r_in / R_out; double omega = 0.5; double viscosity = 100.0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double rPos = sqrt(pow(posX, 2) + pow(posY, 2)); const double cosalfa = posX / rPos; const double sinalfa = posY / rPos; const double sin2alfa = 2.0 * cosalfa * sinalfa; const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out); double computedVelocityTheta = sqrt(pow(velX, 2) + pow(velY, 2)); double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta; double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2); double computedTauTheta = +(tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa; double nodalErrorTauTheta = computedTauTheta - expectedTauTheta; itNode->FastGetSolutionStepValue(NODAL_ERROR_XX) = computedVelocityTheta; // if(posY>-0.01 && posY<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } // if(posX>-0.01 && posX<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * nodalArea; sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * nodalArea; } } double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta); double errorL2TauTheta = sqrt(sumErrorL2TauTheta); std::ofstream myfileVelocity; myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n"; myfileVelocity.close(); } bool CheckPressureConvergence(const double NormDp, double &errorNormDp) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; errorNormDp = 0; // #pragma omp parallel reduction(+:NormP) // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } // } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; errorNormDp = NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " Pressure error: " << errorNormDp << std::endl; } /* else{ */ /* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */ /* } */ if (errorNormDp < mPressureTolerance) { return true; } else return false; } bool FixTimeStepMomentum(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.005; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl; minTolerance = 0.05; if (DvErrorNorm > minTolerance) { std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl; fixedTimeStep = true; // #pragma omp parallel // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } // } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool CheckMomentumConvergence(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.99999; bool fixedTimeStep = false; if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool FixTimeStepContinuity(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.01; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { fixedTimeStep = true; rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true); } else { rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); } return fixedTimeStep; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} // private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy(SolverSettingsType &rSolverConfig) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); //ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance); /* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */ } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", ""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", ""); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. NodalTwoStepVPStrategy &operator=(NodalTwoStepVPStrategy const &rOther) {} /// Copy constructor. NodalTwoStepVPStrategy(NodalTwoStepVPStrategy const &rOther) {} ///@} }; /// Class NodalTwoStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
MetaballComponent.h
#pragma once #include "ECS.h" #include "TransformComponent.h" #include "MarchingCubeComponent.h" #include "glm/gtx/string_cast.hpp" #include <cstdint> #include <functional> class MetaballComponent : public Component { public: MetaballComponent(MarchingCubeComponent* __marchingCubeComponent, float __radius) : Component{} , _marchingCubeComponent { __marchingCubeComponent } , _radius { __radius } {} void update([[maybe_unused]] double __deltaTime) override { // Testings (frame rate dependent) t += 0.015f; if (entity->getEntityID() == 13) { auto& transformComponent = entity->getComponent<TransformComponent>(); transformComponent.setPosition({0.0f, cos(t)*2.5f, sin(t)*2.5f}); } else if (entity->getEntityID() == 14) { auto& transformComponent = entity->getComponent<TransformComponent>(); transformComponent.setPosition({0.0f, cos(t)*2.5f, -sin(t)*2.5f}); } else { auto& transformComponent = entity->getComponent<TransformComponent>(); transformComponent.setPosition({0.0f, sin(-t)*2.5f, 0.0f}); } // End Testings ASSERT(entity->hasComponent<TransformComponent>(), "entity should have a TransformComponent"); auto pos = entity->getComponent<TransformComponent>().position(); auto& grid = _marchingCubeComponent->grid(); std::function<float(glm::vec3)> func = std::bind(&MetaballComponent::f, pos, _radius, std::placeholders::_1); _marchingCubeComponent->addFunc(func); //#pragma omp parallel for for (std::uint64_t x = 0; x < grid.size(); ++x) { for (std::uint64_t y = 0; y < grid[x].size(); ++y) { for (std::uint64_t z = 0; z < grid[x][y].size(); ++z) { // loop cell points for (std::uint8_t i = 0; i < 8; ++i) { _marchingCubeComponent->changeGrid(x, y, z, i, func(grid[x][y][z].points[i])); } } } } } static inline float f(glm::vec3 __center, float __radius, glm::vec3 __pos) { float d = glm::dot(__center - __pos, __center - __pos); if (d == 0.0f) return 0.0f; return std::pow(__radius, 2)/d; } private: MarchingCubeComponent* _marchingCubeComponent; float _radius; double t = 0.0f; };
GB_unaryop__ainv_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_fp32 // op(A') function: GB_tran__ainv_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_fp32 ( uint64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmp_atomic.c
/* * kmp_atomic.c -- ATOMIC implementation routines * $Revision: 43421 $ * $Date: 2014-08-28 08:56:10 -0500 (Thu, 28 Aug 2014) $ */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "kmp_atomic.h" #include "kmp.h" // TRUE, asm routines prototypes typedef unsigned char uchar; typedef unsigned short ushort; /*! @defgroup ATOMIC_OPS Atomic Operations These functions are used for implementing the many different varieties of atomic operations. The compiler is at liberty to inline atomic operations that are naturally supported by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined @code static int s = 0; #pragma omp atomic s++; @endcode using the single instruction: `lock; incl s` However the runtime does provide entrypoints for these operations to support compilers that choose not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the increment above.) The names of the functions are encoded by using the data type name and the operation name, as in these tables. Data Type | Data type encoding -----------|--------------- int8_t | `fixed1` uint8_t | `fixed1u` int16_t | `fixed2` uint16_t | `fixed2u` int32_t | `fixed4` uint32_t | `fixed4u` int32_t | `fixed8` uint32_t | `fixed8u` float | `float4` double | `float8` float 10 (8087 eighty bit float) | `float10` complex<float> | `cmplx4` complex<double> | `cmplx8` complex<float10> | `cmplx10` <br> Operation | Operation encoding ----------|------------------- + | add - | sub \* | mul / | div & | andb << | shl \>\> | shr \| | orb ^ | xor && | andl \|\| | orl maximum | max minimum | min .eqv. | eqv .neqv. | neqv <br> For non-commutative operations, `_rev` can also be added for the reversed operation. For the functions that capture the result, the suffix `_cpt` is added. Update Functions ================ The general form of an atomic function that just performs an update (without a `capture`) @code void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand `capture` functions =================== The capture functions perform an atomic update and return a result, which is either the value before the capture, or that after. They take an additional argument to determine which result is returned. Their general form is therefore @code TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand @param flag one if the result is to be captured *after* the operation, zero if captured *before*. The one set of exceptions to this is the `complex<float>` type where the value is not returned, rather an extra argument pointer is passed. They look like @code void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag ); @endcode Read and Write Operations ========================= The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the value is read or written atomically, with no modification performed. In many cases on IA-32 architecture these operations can be inlined since the architecture guarantees that no tearing occurs on aligned objects accessed with a single memory operation of up to 64 bits in size. The general form of the read operations is @code TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc ); @endcode For the write operations the form is @code void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode Full list of functions ====================== This leads to the generation of 376 atomic functions, as follows. Functons for integers --------------------- There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters). @code __kmpc_atomic_fixed1_add __kmpc_atomic_fixed1_add_cpt __kmpc_atomic_fixed1_add_fp __kmpc_atomic_fixed1_andb __kmpc_atomic_fixed1_andb_cpt __kmpc_atomic_fixed1_andl __kmpc_atomic_fixed1_andl_cpt __kmpc_atomic_fixed1_div __kmpc_atomic_fixed1_div_cpt __kmpc_atomic_fixed1_div_cpt_rev __kmpc_atomic_fixed1_div_float8 __kmpc_atomic_fixed1_div_fp __kmpc_atomic_fixed1_div_rev __kmpc_atomic_fixed1_eqv __kmpc_atomic_fixed1_eqv_cpt __kmpc_atomic_fixed1_max __kmpc_atomic_fixed1_max_cpt __kmpc_atomic_fixed1_min __kmpc_atomic_fixed1_min_cpt __kmpc_atomic_fixed1_mul __kmpc_atomic_fixed1_mul_cpt __kmpc_atomic_fixed1_mul_float8 __kmpc_atomic_fixed1_mul_fp __kmpc_atomic_fixed1_neqv __kmpc_atomic_fixed1_neqv_cpt __kmpc_atomic_fixed1_orb __kmpc_atomic_fixed1_orb_cpt __kmpc_atomic_fixed1_orl __kmpc_atomic_fixed1_orl_cpt __kmpc_atomic_fixed1_rd __kmpc_atomic_fixed1_shl __kmpc_atomic_fixed1_shl_cpt __kmpc_atomic_fixed1_shl_cpt_rev __kmpc_atomic_fixed1_shl_rev __kmpc_atomic_fixed1_shr __kmpc_atomic_fixed1_shr_cpt __kmpc_atomic_fixed1_shr_cpt_rev __kmpc_atomic_fixed1_shr_rev __kmpc_atomic_fixed1_sub __kmpc_atomic_fixed1_sub_cpt __kmpc_atomic_fixed1_sub_cpt_rev __kmpc_atomic_fixed1_sub_fp __kmpc_atomic_fixed1_sub_rev __kmpc_atomic_fixed1_swp __kmpc_atomic_fixed1_wr __kmpc_atomic_fixed1_xor __kmpc_atomic_fixed1_xor_cpt __kmpc_atomic_fixed1u_div __kmpc_atomic_fixed1u_div_cpt __kmpc_atomic_fixed1u_div_cpt_rev __kmpc_atomic_fixed1u_div_fp __kmpc_atomic_fixed1u_div_rev __kmpc_atomic_fixed1u_shr __kmpc_atomic_fixed1u_shr_cpt __kmpc_atomic_fixed1u_shr_cpt_rev __kmpc_atomic_fixed1u_shr_rev __kmpc_atomic_fixed2_add __kmpc_atomic_fixed2_add_cpt __kmpc_atomic_fixed2_add_fp __kmpc_atomic_fixed2_andb __kmpc_atomic_fixed2_andb_cpt __kmpc_atomic_fixed2_andl __kmpc_atomic_fixed2_andl_cpt __kmpc_atomic_fixed2_div __kmpc_atomic_fixed2_div_cpt __kmpc_atomic_fixed2_div_cpt_rev __kmpc_atomic_fixed2_div_float8 __kmpc_atomic_fixed2_div_fp __kmpc_atomic_fixed2_div_rev __kmpc_atomic_fixed2_eqv __kmpc_atomic_fixed2_eqv_cpt __kmpc_atomic_fixed2_max __kmpc_atomic_fixed2_max_cpt __kmpc_atomic_fixed2_min __kmpc_atomic_fixed2_min_cpt __kmpc_atomic_fixed2_mul __kmpc_atomic_fixed2_mul_cpt __kmpc_atomic_fixed2_mul_float8 __kmpc_atomic_fixed2_mul_fp __kmpc_atomic_fixed2_neqv __kmpc_atomic_fixed2_neqv_cpt __kmpc_atomic_fixed2_orb __kmpc_atomic_fixed2_orb_cpt __kmpc_atomic_fixed2_orl __kmpc_atomic_fixed2_orl_cpt __kmpc_atomic_fixed2_rd __kmpc_atomic_fixed2_shl __kmpc_atomic_fixed2_shl_cpt __kmpc_atomic_fixed2_shl_cpt_rev __kmpc_atomic_fixed2_shl_rev __kmpc_atomic_fixed2_shr __kmpc_atomic_fixed2_shr_cpt __kmpc_atomic_fixed2_shr_cpt_rev __kmpc_atomic_fixed2_shr_rev __kmpc_atomic_fixed2_sub __kmpc_atomic_fixed2_sub_cpt __kmpc_atomic_fixed2_sub_cpt_rev __kmpc_atomic_fixed2_sub_fp __kmpc_atomic_fixed2_sub_rev __kmpc_atomic_fixed2_swp __kmpc_atomic_fixed2_wr __kmpc_atomic_fixed2_xor __kmpc_atomic_fixed2_xor_cpt __kmpc_atomic_fixed2u_div __kmpc_atomic_fixed2u_div_cpt __kmpc_atomic_fixed2u_div_cpt_rev __kmpc_atomic_fixed2u_div_fp __kmpc_atomic_fixed2u_div_rev __kmpc_atomic_fixed2u_shr __kmpc_atomic_fixed2u_shr_cpt __kmpc_atomic_fixed2u_shr_cpt_rev __kmpc_atomic_fixed2u_shr_rev __kmpc_atomic_fixed4_add __kmpc_atomic_fixed4_add_cpt __kmpc_atomic_fixed4_add_fp __kmpc_atomic_fixed4_andb __kmpc_atomic_fixed4_andb_cpt __kmpc_atomic_fixed4_andl __kmpc_atomic_fixed4_andl_cpt __kmpc_atomic_fixed4_div __kmpc_atomic_fixed4_div_cpt __kmpc_atomic_fixed4_div_cpt_rev __kmpc_atomic_fixed4_div_float8 __kmpc_atomic_fixed4_div_fp __kmpc_atomic_fixed4_div_rev __kmpc_atomic_fixed4_eqv __kmpc_atomic_fixed4_eqv_cpt __kmpc_atomic_fixed4_max __kmpc_atomic_fixed4_max_cpt __kmpc_atomic_fixed4_min __kmpc_atomic_fixed4_min_cpt __kmpc_atomic_fixed4_mul __kmpc_atomic_fixed4_mul_cpt __kmpc_atomic_fixed4_mul_float8 __kmpc_atomic_fixed4_mul_fp __kmpc_atomic_fixed4_neqv __kmpc_atomic_fixed4_neqv_cpt __kmpc_atomic_fixed4_orb __kmpc_atomic_fixed4_orb_cpt __kmpc_atomic_fixed4_orl __kmpc_atomic_fixed4_orl_cpt __kmpc_atomic_fixed4_rd __kmpc_atomic_fixed4_shl __kmpc_atomic_fixed4_shl_cpt __kmpc_atomic_fixed4_shl_cpt_rev __kmpc_atomic_fixed4_shl_rev __kmpc_atomic_fixed4_shr __kmpc_atomic_fixed4_shr_cpt __kmpc_atomic_fixed4_shr_cpt_rev __kmpc_atomic_fixed4_shr_rev __kmpc_atomic_fixed4_sub __kmpc_atomic_fixed4_sub_cpt __kmpc_atomic_fixed4_sub_cpt_rev __kmpc_atomic_fixed4_sub_fp __kmpc_atomic_fixed4_sub_rev __kmpc_atomic_fixed4_swp __kmpc_atomic_fixed4_wr __kmpc_atomic_fixed4_xor __kmpc_atomic_fixed4_xor_cpt __kmpc_atomic_fixed4u_div __kmpc_atomic_fixed4u_div_cpt __kmpc_atomic_fixed4u_div_cpt_rev __kmpc_atomic_fixed4u_div_fp __kmpc_atomic_fixed4u_div_rev __kmpc_atomic_fixed4u_shr __kmpc_atomic_fixed4u_shr_cpt __kmpc_atomic_fixed4u_shr_cpt_rev __kmpc_atomic_fixed4u_shr_rev __kmpc_atomic_fixed8_add __kmpc_atomic_fixed8_add_cpt __kmpc_atomic_fixed8_add_fp __kmpc_atomic_fixed8_andb __kmpc_atomic_fixed8_andb_cpt __kmpc_atomic_fixed8_andl __kmpc_atomic_fixed8_andl_cpt __kmpc_atomic_fixed8_div __kmpc_atomic_fixed8_div_cpt __kmpc_atomic_fixed8_div_cpt_rev __kmpc_atomic_fixed8_div_float8 __kmpc_atomic_fixed8_div_fp __kmpc_atomic_fixed8_div_rev __kmpc_atomic_fixed8_eqv __kmpc_atomic_fixed8_eqv_cpt __kmpc_atomic_fixed8_max __kmpc_atomic_fixed8_max_cpt __kmpc_atomic_fixed8_min __kmpc_atomic_fixed8_min_cpt __kmpc_atomic_fixed8_mul __kmpc_atomic_fixed8_mul_cpt __kmpc_atomic_fixed8_mul_float8 __kmpc_atomic_fixed8_mul_fp __kmpc_atomic_fixed8_neqv __kmpc_atomic_fixed8_neqv_cpt __kmpc_atomic_fixed8_orb __kmpc_atomic_fixed8_orb_cpt __kmpc_atomic_fixed8_orl __kmpc_atomic_fixed8_orl_cpt __kmpc_atomic_fixed8_rd __kmpc_atomic_fixed8_shl __kmpc_atomic_fixed8_shl_cpt __kmpc_atomic_fixed8_shl_cpt_rev __kmpc_atomic_fixed8_shl_rev __kmpc_atomic_fixed8_shr __kmpc_atomic_fixed8_shr_cpt __kmpc_atomic_fixed8_shr_cpt_rev __kmpc_atomic_fixed8_shr_rev __kmpc_atomic_fixed8_sub __kmpc_atomic_fixed8_sub_cpt __kmpc_atomic_fixed8_sub_cpt_rev __kmpc_atomic_fixed8_sub_fp __kmpc_atomic_fixed8_sub_rev __kmpc_atomic_fixed8_swp __kmpc_atomic_fixed8_wr __kmpc_atomic_fixed8_xor __kmpc_atomic_fixed8_xor_cpt __kmpc_atomic_fixed8u_div __kmpc_atomic_fixed8u_div_cpt __kmpc_atomic_fixed8u_div_cpt_rev __kmpc_atomic_fixed8u_div_fp __kmpc_atomic_fixed8u_div_rev __kmpc_atomic_fixed8u_shr __kmpc_atomic_fixed8u_shr_cpt __kmpc_atomic_fixed8u_shr_cpt_rev __kmpc_atomic_fixed8u_shr_rev @endcode Functions for floating point ---------------------------- There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes. (Ten byte floats are used by X87, but are now rare). @code __kmpc_atomic_float4_add __kmpc_atomic_float4_add_cpt __kmpc_atomic_float4_add_float8 __kmpc_atomic_float4_add_fp __kmpc_atomic_float4_div __kmpc_atomic_float4_div_cpt __kmpc_atomic_float4_div_cpt_rev __kmpc_atomic_float4_div_float8 __kmpc_atomic_float4_div_fp __kmpc_atomic_float4_div_rev __kmpc_atomic_float4_max __kmpc_atomic_float4_max_cpt __kmpc_atomic_float4_min __kmpc_atomic_float4_min_cpt __kmpc_atomic_float4_mul __kmpc_atomic_float4_mul_cpt __kmpc_atomic_float4_mul_float8 __kmpc_atomic_float4_mul_fp __kmpc_atomic_float4_rd __kmpc_atomic_float4_sub __kmpc_atomic_float4_sub_cpt __kmpc_atomic_float4_sub_cpt_rev __kmpc_atomic_float4_sub_float8 __kmpc_atomic_float4_sub_fp __kmpc_atomic_float4_sub_rev __kmpc_atomic_float4_swp __kmpc_atomic_float4_wr __kmpc_atomic_float8_add __kmpc_atomic_float8_add_cpt __kmpc_atomic_float8_add_fp __kmpc_atomic_float8_div __kmpc_atomic_float8_div_cpt __kmpc_atomic_float8_div_cpt_rev __kmpc_atomic_float8_div_fp __kmpc_atomic_float8_div_rev __kmpc_atomic_float8_max __kmpc_atomic_float8_max_cpt __kmpc_atomic_float8_min __kmpc_atomic_float8_min_cpt __kmpc_atomic_float8_mul __kmpc_atomic_float8_mul_cpt __kmpc_atomic_float8_mul_fp __kmpc_atomic_float8_rd __kmpc_atomic_float8_sub __kmpc_atomic_float8_sub_cpt __kmpc_atomic_float8_sub_cpt_rev __kmpc_atomic_float8_sub_fp __kmpc_atomic_float8_sub_rev __kmpc_atomic_float8_swp __kmpc_atomic_float8_wr __kmpc_atomic_float10_add __kmpc_atomic_float10_add_cpt __kmpc_atomic_float10_add_fp __kmpc_atomic_float10_div __kmpc_atomic_float10_div_cpt __kmpc_atomic_float10_div_cpt_rev __kmpc_atomic_float10_div_fp __kmpc_atomic_float10_div_rev __kmpc_atomic_float10_mul __kmpc_atomic_float10_mul_cpt __kmpc_atomic_float10_mul_fp __kmpc_atomic_float10_rd __kmpc_atomic_float10_sub __kmpc_atomic_float10_sub_cpt __kmpc_atomic_float10_sub_cpt_rev __kmpc_atomic_float10_sub_fp __kmpc_atomic_float10_sub_rev __kmpc_atomic_float10_swp __kmpc_atomic_float10_wr __kmpc_atomic_float16_add __kmpc_atomic_float16_add_cpt __kmpc_atomic_float16_div __kmpc_atomic_float16_div_cpt __kmpc_atomic_float16_div_cpt_rev __kmpc_atomic_float16_div_rev __kmpc_atomic_float16_max __kmpc_atomic_float16_max_cpt __kmpc_atomic_float16_min __kmpc_atomic_float16_min_cpt __kmpc_atomic_float16_mul __kmpc_atomic_float16_mul_cpt __kmpc_atomic_float16_rd __kmpc_atomic_float16_sub __kmpc_atomic_float16_sub_cpt __kmpc_atomic_float16_sub_cpt_rev __kmpc_atomic_float16_sub_rev __kmpc_atomic_float16_swp __kmpc_atomic_float16_wr @endcode Functions for Complex types --------------------------- Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes. The names here are based on the size of the component float, *not* the size of the complex type. So `__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`. @code __kmpc_atomic_cmplx4_add __kmpc_atomic_cmplx4_add_cmplx8 __kmpc_atomic_cmplx4_add_cpt __kmpc_atomic_cmplx4_div __kmpc_atomic_cmplx4_div_cmplx8 __kmpc_atomic_cmplx4_div_cpt __kmpc_atomic_cmplx4_div_cpt_rev __kmpc_atomic_cmplx4_div_rev __kmpc_atomic_cmplx4_mul __kmpc_atomic_cmplx4_mul_cmplx8 __kmpc_atomic_cmplx4_mul_cpt __kmpc_atomic_cmplx4_rd __kmpc_atomic_cmplx4_sub __kmpc_atomic_cmplx4_sub_cmplx8 __kmpc_atomic_cmplx4_sub_cpt __kmpc_atomic_cmplx4_sub_cpt_rev __kmpc_atomic_cmplx4_sub_rev __kmpc_atomic_cmplx4_swp __kmpc_atomic_cmplx4_wr __kmpc_atomic_cmplx8_add __kmpc_atomic_cmplx8_add_cpt __kmpc_atomic_cmplx8_div __kmpc_atomic_cmplx8_div_cpt __kmpc_atomic_cmplx8_div_cpt_rev __kmpc_atomic_cmplx8_div_rev __kmpc_atomic_cmplx8_mul __kmpc_atomic_cmplx8_mul_cpt __kmpc_atomic_cmplx8_rd __kmpc_atomic_cmplx8_sub __kmpc_atomic_cmplx8_sub_cpt __kmpc_atomic_cmplx8_sub_cpt_rev __kmpc_atomic_cmplx8_sub_rev __kmpc_atomic_cmplx8_swp __kmpc_atomic_cmplx8_wr __kmpc_atomic_cmplx10_add __kmpc_atomic_cmplx10_add_cpt __kmpc_atomic_cmplx10_div __kmpc_atomic_cmplx10_div_cpt __kmpc_atomic_cmplx10_div_cpt_rev __kmpc_atomic_cmplx10_div_rev __kmpc_atomic_cmplx10_mul __kmpc_atomic_cmplx10_mul_cpt __kmpc_atomic_cmplx10_rd __kmpc_atomic_cmplx10_sub __kmpc_atomic_cmplx10_sub_cpt __kmpc_atomic_cmplx10_sub_cpt_rev __kmpc_atomic_cmplx10_sub_rev __kmpc_atomic_cmplx10_swp __kmpc_atomic_cmplx10_wr __kmpc_atomic_cmplx16_add __kmpc_atomic_cmplx16_add_cpt __kmpc_atomic_cmplx16_div __kmpc_atomic_cmplx16_div_cpt __kmpc_atomic_cmplx16_div_cpt_rev __kmpc_atomic_cmplx16_div_rev __kmpc_atomic_cmplx16_mul __kmpc_atomic_cmplx16_mul_cpt __kmpc_atomic_cmplx16_rd __kmpc_atomic_cmplx16_sub __kmpc_atomic_cmplx16_sub_cpt __kmpc_atomic_cmplx16_sub_cpt_rev __kmpc_atomic_cmplx16_swp __kmpc_atomic_cmplx16_wr @endcode */ /*! @ingroup ATOMIC_OPS @{ */ /* * Global vars */ #ifndef KMP_GOMP_COMPAT int __kmp_atomic_mode = 1; // Intel perf #else int __kmp_atomic_mode = 2; // GOMP compatibility #endif /* KMP_GOMP_COMPAT */ KMP_ALIGN(128) kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */ kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */ kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */ kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */ kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/ kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/ kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */ /* 2007-03-02: Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a bug on *_32 and *_32e. This is just a temporary workaround for the problem. It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG routines in assembler language. */ #define KMP_ATOMIC_VOLATILE volatile #if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; }; #endif /* ------------------------------------------------------------------------ */ /* ATOMIC implementation routines */ /* one routine for each operation and operand type */ /* ------------------------------------------------------------------------ */ // All routines declarations looks like // void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs ); // ------------------------------------------------------------------------ #define KMP_CHECK_GTID \ if ( gtid == KMP_GTID_UNKNOWN ) { \ gtid = __kmp_entry_gtid(); \ } // check and get gtid when needed // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Lock variables used for critical sections for various size operands #define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat #define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char #define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short #define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int #define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float #define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int #define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double #define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex #define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double #define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad #define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex #define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex #define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) OP (rhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ // For GNU compatibility, we may need to use a critical section, // even though it is not required by the ISA. // // On IA-32 architecture, all atomic operations except for fixed 4 byte add, // sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common // critical section. On Intel(R) 64, all atomic operations are done with fetch // and add or compare and exchange. Therefore, the FLAG parameter to this // macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which // require a critical section, where we predict that they will be implemented // in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()). // // When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct, // the FLAG parameter should always be 1. If we know that we will be using // a critical section, then we want to make certain that we use the generic // lock __kmp_atomic_lock to protect the atomic update, and not of of the // locks that are specialized based upon the size or type of the data. // // If FLAG is 0, then we are relying on dead code elimination by the build // compiler to get rid of the useless block of code, and save a needless // branch at runtime. // #ifdef KMP_GOMP_COMPAT # define OP_GOMP_CRITICAL(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL( OP, 0 ); \ return; \ } # else # define OP_GOMP_CRITICAL(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ #if KMP_MIC # define KMP_DO_PAUSE _mm_delay_32( 1 ) #else # define KMP_DO_PAUSE KMP_CPU_PAUSE() #endif /* KMP_MIC */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator #define OP_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE old_value, new_value; \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ } \ } #if USE_CMPXCHG_FIX // 2007-06-25: // workaround for C78287 (complex(kind=4) data type) // lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm) // Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro. // This is a problem of the compiler. // Related tracker is C76005, targeted to 11.0. // I verified the asm of the workaround. #define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ { \ char anonym[ ( sizeof( TYPE ) == sizeof( kmp_int##BITS ) ) ? ( 1 ) : ( 0 ) ] = { 1 }; \ struct _sss { \ TYPE cmp; \ kmp_int##BITS *vvv; \ }; \ struct _sss old_value, new_value; \ old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \ new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \ { \ KMP_DO_PAUSE; \ \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ } \ } // end of the first part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #endif #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // end of the second part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // Routines for ATOMIC 4-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub // Routines for ATOMIC 8-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // MASK - used for alignment check // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG /* ------------------------------------------------------------------------ */ /* Routines for C/C++ Reduction operators && and || */ /* ------------------------------------------------------------------------ */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment // TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used #define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CRITICAL( = *lhs OP, LCK_ID ) \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl /* ------------------------------------------------------------------------- */ /* Routines for Fortran operators that matched no one in C: */ /* MAX, MIN, .EQV., .NEQV. */ /* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */ /* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */ /* ------------------------------------------------------------------------- */ // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ *lhs = rhs; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT( OP, 0 ); \ return; \ } #else #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value; \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT(OP,LCK_ID) \ } \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ } \ } #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \ } \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min #if KMP_HAVE_QUAD MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16 MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16 #endif #endif // ------------------------------------------------------------------------ // Need separate macros for .EQV. because of the need of complement (~) // OP ignored for critical sections, ^=~ used instead #define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \ } // ------------------------------------------------------------------------ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16 ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16 ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16 ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16 #endif #endif // routines for complex types #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div // end of the workaround for C78287 #else ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div #endif // USE_CMPXCHG_FIX ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div #if KMP_HAVE_QUAD ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16 ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16 ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16 ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16 #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: x = expr binop x for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) = (rhs) OP (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_REV( OP, 0 ); \ return; \ } #else #define OP_GOMP_CRITICAL_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev #endif #endif // routines for complex types ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev #endif #endif #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 // End of OpenMP 4.0: x = expr binop x for non-commutative operations. #endif //OMP_40_ENABLED /* ------------------------------------------------------------------------ */ /* Routines for mixed types of LHS and RHS, when RHS is "larger" */ /* Note: in order to reduce the total number of types combinations */ /* it is supposed that compiler converts RHS to longest floating type,*/ /* that is _Quad, before call to any of these routines */ /* Conversion to _Quad will be done by the compiler during calculation, */ /* conversion back to TYPE - before the assignment, like: */ /* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */ /* Performance penalty expected because of SW emulation use */ /* ------------------------------------------------------------------------ */ #define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // RHS=float8 ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8 ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them) #if KMP_HAVE_QUAD ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #else #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #endif // USE_CMPXCHG_FIX #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 ////////////////////////////////////////////////////////////////////////////////////////////////////// // ------------------------------------------------------------------------ // Atomic READ routines // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store_ret" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) // TODO: check if it is still necessary // Return old value regardless of the result of "compare & swap# operation #define OP_CMPXCHG_READ(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ union f_i_union { \ TYPE f_val; \ kmp_int##BITS i_val; \ }; \ union f_i_union old_value; \ temp_val = *loc; \ old_value.f_val = temp_val; \ old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \ new_value = old_value.f_val; \ return new_value; \ } // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_READ(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ new_value = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_READ(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \ return new_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ OP_CMPXCHG_READ(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \ return new_value; \ } // ------------------------------------------------------------------------ // Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work. // Let's return the read value through the additional parameter. #if ( KMP_OS_WINDOWS ) #define OP_CRITICAL_READ_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*out) = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \ } #endif // KMP_OS_WINDOWS // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd // !!! TODO: Remove lock operations for "char" since it can't be non-atomic ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd #endif // KMP_HAVE_QUAD // Fix for CQ220361 on Windows* OS #if ( KMP_OS_WINDOWS ) ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #else ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #endif ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd #endif #endif // ------------------------------------------------------------------------ // Atomic WRITE routines // ------------------------------------------------------------------------ #define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_FIXED##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_REAL##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_WR(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ OP_CMPXCHG_WR(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #else ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #endif ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #else ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #endif ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr #endif ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr #endif #endif // ------------------------------------------------------------------------ // Atomic CAPTURE routines // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ new_value = (*lhs); \ } else { \ new_value = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE old_value, new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ if( flag ) { \ return old_value OP rhs; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for C/C++ Reduction operators && and || // ------------------------------------------------------------------------ // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_L_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ new_value OP rhs; \ } else \ new_value = (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_L_CPT( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment #define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt // ------------------------------------------------------------------------- // Routines for Fortran operators that matched no one in C: // MAX, MIN, .EQV., .NEQV. // Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt // Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ old_value = *lhs; \ *lhs = rhs; \ if ( flag ) \ new_value = rhs; \ else \ new_value = old_value; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; \ // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT_CPT( OP, 0 ); \ } #else #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*TYPE old_value; */ \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ if( flag ) \ return rhs; \ else \ return old_value; \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ } \ return *lhs; \ } #define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ } \ return *lhs; \ } MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt #if KMP_HAVE_QUAD MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt #endif #endif // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_WRK( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \ } // The end of workaround for cmplx4 /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt #endif #endif // routines for complex types // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ /*temp_val = (*lhs);*/\ (*lhs) = (rhs) OP (*lhs); \ new_value = (*lhs); \ } else { \ new_value = (*lhs);\ (*lhs) = (rhs) OP (*lhs); \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev #endif #endif // routines for complex types // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) = (rhs) OP (*lhs); \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) = (rhs) OP (*lhs); \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ } // The end of workaround for cmplx4 // !!! TODO: check if we need to return void for cmplx4 routines // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev #endif #endif // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} #define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ old_value = (*lhs); \ (*lhs) = rhs; \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return old_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP( 0 ); \ } #else #define GOMP_CRITICAL_SWP(FLAG) #endif /* KMP_GOMP_COMPAT */ #define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define CMPXCHG_SWP(TYPE,BITS) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CMPXCHG_SWP(TYPE,BITS) \ } ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #else ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #endif // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) #define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CRITICAL_SWP(LCK_ID) \ } // ------------------------------------------------------------------------ // !!! TODO: check if we need to return void for cmplx4 routines // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP_WRK(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ tmp = (*lhs); \ (*lhs) = (rhs); \ (*out) = tmp; \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP_WRK(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP_WRK( 0 ); \ } #else #define GOMP_CRITICAL_SWP_WRK(FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ TYPE tmp; \ GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \ CRITICAL_SWP_WRK(LCK_ID) \ } // The end of workaround for cmplx4 ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp #endif // cmplx4 routine to return void ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp //ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp #endif #endif // End of OpenMP 4.0 Capture #endif //OMP_40_ENABLED #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 #undef OP_CRITICAL /* ------------------------------------------------------------------------ */ /* Generic atomic routines */ /* ------------------------------------------------------------------------ */ void __kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #else TRUE #endif ) { kmp_int8 old_value, new_value; old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs, *(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 1-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid ); } } void __kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */ #endif ) { kmp_int16 old_value, new_value; old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs, *(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 2-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid ); } } void __kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( // // FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints. // Gomp compatibility is broken if this routine is called for floats. // #if KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */ #endif ) { kmp_int32 old_value, new_value; old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs, *(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_4i for all 4-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid ); } } void __kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */ #endif ) { kmp_int64 old_value, new_value; old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs, *(kmp_int64 *) &old_value, *(kmp_int64 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_8i for all 8-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid ); } } void __kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid ); } void __kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid ); } void __kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid ); } void __kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid ); } // AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler // duplicated in order to not use 3-party names in pure Intel code // TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin. void __kmpc_atomic_start(void) { int gtid = __kmp_entry_gtid(); KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid)); __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); } void __kmpc_atomic_end(void) { int gtid = __kmp_get_gtid(); KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid)); __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); } /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /*! @} */ // end of file
GB_unop__exp_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp_fc32_fc32) // op(A') function: GB (_unop_tran__exp_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = cexpf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cexpf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = cexpf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "xgboost/base.h" #include "xgboost/json.h" #include "xgboost/tree_updater.h" #include "param.h" #include "constraints.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" #include "../common/threading_utils.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Configure(const Args& args) override { param_.UpdateAllowUnknown(args); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("train_param"), &this->param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["train_param"] = ToJson(param_); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = page[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const { std::vector<bst_feature_t> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { { // setup position position_.resize(gpair.size()); std::fill(position_.begin(), position_.end(), 0); // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { CHECK_EQ(param_.sampling_method, TrainParam::kUniform) << "Only uniform sampling is supported, " << "gradient-based sampling is only support by GPU Hist."; std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); qexpand_.push_back(0); this->UpdateNode2WorkIndex(tree); } this->interaction_constraints_.Configure(param_, fmat.Info().num_col_); } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); common::ParallelFor(ndata, [&](bst_omp_uint ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } }); } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { auto page = batch.GetView(); for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = page[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); common::ParallelFor(ndata, [&](bst_omp_uint j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } }); } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { auto page = batch.GetView(); for (auto fid : fsplits) { auto col = page[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); common::ParallelFor(ndata, [&](bst_omp_uint j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } }); } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); dmlc::OMPException exc; #pragma omp parallel { exc.Run([&]() { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid : qexpand_) { thread_temp[tid][nid] = TStats(); } }); } exc.Rethrow(); // setup position const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_); common::ParallelFor(ndata, [&](bst_omp_uint ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } }); // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; FeatureInteractionConstraintHost interaction_constraints_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
residual.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdint.h> #include "../timer.h" //------------------------------------------------------------------------------------------------------------------------------ void residual(domain_type * domain, int level, int res_id, int phi_id, int rhs_id, double a, double b){ // exchange the boundary for x in prep for Ax... // for 7-point stencil, only needs to be a 1-deep ghost zone & faces only exchange_boundary(domain,level,phi_id,1,0,0); // now do residual/restriction proper... uint64_t _timeStart = CycleTime(); int CollaborativeThreadingBoxSize = 100000; // i.e. never #ifdef __COLLABORATIVE_THREADING CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING; #endif int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize); int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize); int box; #pragma omp parallel for private(box) if(omp_across_boxes) for(box=0;box<domain->subdomains_per_rank;box++){ int i,j,k; int pencil = domain->subdomains[box].levels[level].pencil; int plane = domain->subdomains[box].levels[level].plane; int ghosts = domain->subdomains[box].levels[level].ghosts; int dim_k = domain->subdomains[box].levels[level].dim.k; int dim_j = domain->subdomains[box].levels[level].dim.j; int dim_i = domain->subdomains[box].levels[level].dim.i; double h2inv = 1.0/(domain->h[level]*domain->h[level]); double * __restrict__ phi = domain->subdomains[box].levels[level].grids[ phi_id] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point double * __restrict__ rhs = domain->subdomains[box].levels[level].grids[ rhs_id] + ghosts*(1+pencil+plane); double * __restrict__ alpha = domain->subdomains[box].levels[level].grids[__alpha ] + ghosts*(1+pencil+plane); double * __restrict__ beta_i = domain->subdomains[box].levels[level].grids[__beta_i] + ghosts*(1+pencil+plane); double * __restrict__ beta_j = domain->subdomains[box].levels[level].grids[__beta_j] + ghosts*(1+pencil+plane); double * __restrict__ beta_k = domain->subdomains[box].levels[level].grids[__beta_k] + ghosts*(1+pencil+plane); double * __restrict__ res = domain->subdomains[box].levels[level].grids[ res_id] + ghosts*(1+pencil+plane); #pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2) for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int ijk = i + j*pencil + k*plane; double helmholtz = a*alpha[ijk]*phi[ijk] -b*h2inv*( beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] ) -beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] ) +beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] ) -beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] ) +beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] ) -beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] ) ); res[ijk] = rhs[ijk]-helmholtz; }}} } domain->cycles.residual[level] += (uint64_t)(CycleTime()-_timeStart); } #if 1 //------------------------------------------------------------------------------------------------------------------------------ // This version maximizes parallelism by parallelizing over the resultant coarse grid. // Thus, // one parallelizes over the list of 2x2 fine-grid bars, // initializes a coarse grid pencil to zero, // additively restricts each pencil in the 2x2 fine-grid bar to the coarse grid pencil //------------------------------------------------------------------------------------------------------------------------------ void residual_and_restriction(domain_type *domain, int level_f, int phi_id, int rhs_id, int level_c, int res_id, double a, double b){ // exchange the boundary for x in prep for Ax... // for 7-point stencil, only needs to be a 1-deep ghost zone & faces only exchange_boundary(domain,level_f,phi_id,1,0,0); // now do residual/restriction proper... uint64_t _timeStart = CycleTime(); int CollaborativeThreadingBoxSize = 100000; // i.e. never #ifdef __COLLABORATIVE_THREADING CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING; #endif int omp_across_boxes = (domain->subdomains[0].levels[level_f].dim.i < CollaborativeThreadingBoxSize); int omp_within_a_box = (domain->subdomains[0].levels[level_f].dim.i >= CollaborativeThreadingBoxSize); int box; #pragma omp parallel for private(box) if(omp_across_boxes) for(box=0;box<domain->subdomains_per_rank;box++){ int kk,jj; int pencil_c = domain->subdomains[box].levels[level_c].pencil; int plane_c = domain->subdomains[box].levels[level_c].plane; int ghosts_c = domain->subdomains[box].levels[level_c].ghosts; int dim_k_c = domain->subdomains[box].levels[level_c].dim.k; int dim_j_c = domain->subdomains[box].levels[level_c].dim.j; int dim_i_c = domain->subdomains[box].levels[level_c].dim.i; int pencil_f = domain->subdomains[box].levels[level_f].pencil; int plane_f = domain->subdomains[box].levels[level_f].plane; int ghosts_f = domain->subdomains[box].levels[level_f].ghosts; int dim_k_f = domain->subdomains[box].levels[level_f].dim.k; int dim_j_f = domain->subdomains[box].levels[level_f].dim.j; int dim_i_f = domain->subdomains[box].levels[level_f].dim.i; double h2inv = 1.0/(domain->h[level_f]*domain->h[level_f]); double * __restrict__ phi = domain->subdomains[box].levels[level_f].grids[ phi_id] + ghosts_f*(1+pencil_f+plane_f); // i.e. [0] = first non ghost zone point double * __restrict__ rhs = domain->subdomains[box].levels[level_f].grids[ rhs_id] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ alpha = domain->subdomains[box].levels[level_f].grids[__alpha ] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ beta_i = domain->subdomains[box].levels[level_f].grids[__beta_i] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ beta_j = domain->subdomains[box].levels[level_f].grids[__beta_j] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ beta_k = domain->subdomains[box].levels[level_f].grids[__beta_k] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ res = domain->subdomains[box].levels[level_c].grids[ res_id] + ghosts_c*(1+pencil_c+plane_c); #pragma omp parallel for private(kk,jj) if(omp_within_a_box) collapse(2) for(kk=0;kk<dim_k_f;kk+=2){ for(jj=0;jj<dim_j_f;jj+=2){ int i,j,k; for(i=0;i<dim_i_c;i++){ int ijk_c = (i) + (jj>>1)*pencil_c + (kk>>1)*plane_c; res[ijk_c] = 0.0; } for(k=kk;k<kk+2;k++){ for(j=jj;j<jj+2;j++){ for(i=0;i<dim_i_f;i++){ int ijk_f = (i ) + (j )*pencil_f + (k )*plane_f; int ijk_c = (i>>1) + (j>>1)*pencil_c + (k>>1)*plane_c; double helmholtz = a*alpha[ijk_f]*phi[ijk_f] -b*h2inv*( beta_i[ijk_f+1 ]*( phi[ijk_f+1 ]-phi[ijk_f ] ) -beta_i[ijk_f ]*( phi[ijk_f ]-phi[ijk_f-1 ] ) +beta_j[ijk_f+pencil_f]*( phi[ijk_f+pencil_f]-phi[ijk_f ] ) -beta_j[ijk_f ]*( phi[ijk_f ]-phi[ijk_f-pencil_f] ) +beta_k[ijk_f+plane_f ]*( phi[ijk_f+plane_f ]-phi[ijk_f ] ) -beta_k[ijk_f ]*( phi[ijk_f ]-phi[ijk_f-plane_f ] ) ); res[ijk_c] += (rhs[ijk_f]-helmholtz)*0.125; } }}}} } domain->cycles.residual[level_f] += (uint64_t)(CycleTime()-_timeStart); } #else //------------------------------------------------------------------------------------------------------------------------------ // This version performs a 1D parallelization over the coarse-grid k-dimension (every two fine-grid planes) // It first zeros the coarse grid plane, then increments with restrictions from the fine grid //------------------------------------------------------------------------------------------------------------------------------ void residual_and_restriction(domain_type *domain, int level_f, int phi_id, int rhs_id, int level_c, int res_id, double a, double b){ // exchange the boundary for x in prep for Ax... // for 7-point stencil, only needs to be a 1-deep ghost zone & faces only exchange_boundary(domain,level_f,phi_id,1,0,0); uint64_t _timeStart = CycleTime(); int CollaborativeThreadingBoxSize = 100000; // i.e. never #ifdef __COLLABORATIVE_THREADING CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING; #endif int omp_across_boxes = (domain->subdomains[0].levels[level_f].dim.i < CollaborativeThreadingBoxSize); int omp_within_a_box = (domain->subdomains[0].levels[level_f].dim.i >= CollaborativeThreadingBoxSize); int box; #pragma omp parallel for private(box) if(omp_across_boxes) for(box=0;box<domain->subdomains_per_rank;box++){ int pencil_c = domain->subdomains[box].levels[level_c].pencil; int plane_c = domain->subdomains[box].levels[level_c].plane; int ghosts_c = domain->subdomains[box].levels[level_c].ghosts; int dim_k_c = domain->subdomains[box].levels[level_c].dim.k; int dim_j_c = domain->subdomains[box].levels[level_c].dim.j; int dim_i_c = domain->subdomains[box].levels[level_c].dim.i; int pencil_f = domain->subdomains[box].levels[level_f].pencil; int plane_f = domain->subdomains[box].levels[level_f].plane; int ghosts_f = domain->subdomains[box].levels[level_f].ghosts; int dim_k_f = domain->subdomains[box].levels[level_f].dim.k; int dim_j_f = domain->subdomains[box].levels[level_f].dim.j; int dim_i_f = domain->subdomains[box].levels[level_f].dim.i; double h2inv = 1.0/(domain->h[level_f]*domain->h[level_f]); double * __restrict__ phi = domain->subdomains[box].levels[level_f].grids[ phi_id] + ghosts_f*(1+pencil_f+plane_f); // i.e. [0] = first non ghost zone point double * __restrict__ rhs = domain->subdomains[box].levels[level_f].grids[ rhs_id] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ alpha = domain->subdomains[box].levels[level_f].grids[__alpha ] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ beta_i = domain->subdomains[box].levels[level_f].grids[__beta_i] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ beta_j = domain->subdomains[box].levels[level_f].grids[__beta_j] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ beta_k = domain->subdomains[box].levels[level_f].grids[__beta_k] + ghosts_f*(1+pencil_f+plane_f); double * __restrict__ res = domain->subdomains[box].levels[level_c].grids[ res_id] + ghosts_c*(1+pencil_c+plane_c); int kk; #pragma omp parallel for private(kk) if(omp_within_a_box) for(kk=0;kk<dim_k_f;kk+=2){ int i,j,k; // zero out the next coarse grid plane for(j=0;j<dim_j_c;j++){ for(i=0;i<dim_i_c;i++){ int ijk_c = (i) + (j)*pencil_c + (kk>>1)*plane_c; res[ijk_c] = 0.0; }} // restrict two fine grid planes into one coarse grid plane for(k=kk;k<kk+2;k++){ for(j=0;j<dim_j_f;j++){ for(i=0;i<dim_i_f;i++){ int ijk_f = (i ) + (j )*pencil_f + (k )*plane_f; int ijk_c = (i>>1) + (j>>1)*pencil_c + (k>>1)*plane_c; double helmholtz = a*alpha[ijk_f]*phi[ijk_f] -b*h2inv*( beta_i[ijk_f+1 ]*( phi[ijk_f+1 ]-phi[ijk_f ] ) -beta_i[ijk_f ]*( phi[ijk_f ]-phi[ijk_f-1 ] ) +beta_j[ijk_f+pencil_f]*( phi[ijk_f+pencil_f]-phi[ijk_f ] ) -beta_j[ijk_f ]*( phi[ijk_f ]-phi[ijk_f-pencil_f] ) +beta_k[ijk_f+plane_f ]*( phi[ijk_f+plane_f ]-phi[ijk_f ] ) -beta_k[ijk_f ]*( phi[ijk_f ]-phi[ijk_f-plane_f ] ) ); res[ijk_c] += (rhs[ijk_f]-helmholtz)*0.125; }}} } } domain->cycles.residual[level_f] += (uint64_t)(CycleTime()-_timeStart); } #endif //------------------------------------------------------------------------------------------------------------------------------
denseOptimizedBlocksJacobi.h
// // Created by mbarb on 23/02/2018. // #ifndef PARALLELITERATIVE_DENSEOPTIMIZEDBLOCKJACOBI_H #define PARALLELITERATIVE_DENSEOPTIMIZEDBLOCKJACOBI_H #include "Eigen" #include "utils.h" #include "denseParallelJacobi.h" namespace Iterative { template <typename Scalar, long long SIZE> class denseOptimizedBlocksJacobi : public denseParallelJacobi<Scalar, SIZE> { public: /** * * @param A linear system matrix * @param b known term vector * @param iterations max number of iterations * @param tolerance min error tolerated * @param workers number of threads * @param blockSize size of the block */ explicit denseOptimizedBlocksJacobi( const Eigen::Matrix<Scalar, SIZE, SIZE>& A, const Eigen::ColumnVector<Scalar, SIZE>& b, const ulonglong iterations, const Scalar tolerance, const ulong workers = 0L, const ulonglong blockSize = 0L) : denseParallelJacobi<Scalar, SIZE>::denseParallelJacobi(A, b, iterations, tolerance, workers) { this->blockSize = blockSize; if (blockSize == 0) this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong)1L); splitter(); } /** * * @return */ const Eigen::ColumnVector<Scalar, SIZE> solve() { Eigen::ColumnVector<Scalar, SIZE> oldSolution(this->solution); std::vector<Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>> inverses(blocks.size()); // compute the inverses of the blocks and memorize it #pragma omp parallel for for (int i = 0; i < blocks.size(); ++i) { inverses[i] = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols, blocks[i].rows).inverse(); } std::vector<int> index; Eigen::ColumnVector<Scalar, Eigen::Dynamic> Ax = Eigen::ColumnVector<Scalar, Eigen::Dynamic>::Zero(this->solution.rows(),this->solution.cols()); for (this->iteration=0L; this->iteration < this->iterations; ++this->iteration) { Ax = this->A*oldSolution; // #pragma omp parallel for // for (auto i = 0; i < this->A.rows(); ++i) { // Ax[i]=this->A.row(i)*oldSolution; // } #pragma omp parallel for schedule(dynamic) for (auto i = 0; i < inverses.size(); ++i) { auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols); auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols); Eigen::ColumnVector<Scalar,Eigen::Dynamic> correction = Eigen::ColumnVector<Scalar,Eigen::Dynamic>::Zero(oldSolution.rows(), oldSolution.cols()); for (auto col = blocks[i].startCol; col < blocks[i].startCol+blocks[i].cols; ++col) { correction+=this->A.col(col)*oldSolution[col]; } block = inverses[i] * (this->b - Ax + correction).segment(blocks[i].startCol, blocks[i].cols); if ((zeroBlock - block).template lpNorm<1>() <= this->tolerance*block.size()) { #pragma omp critical index.emplace_back(i); } zeroBlock = block; } if (!index.empty()) { std::sort(index.rbegin(), index.rend()); for (auto i : index) { blocks.erase(blocks.begin() + i); inverses.erase(inverses.begin() + i); } if (inverses.empty()) break; index.clear(); } std::swap(this->solution, oldSolution); } std::cout << this->iteration << std::endl; return this->solution; } protected: ulonglong blockSize; std::vector<Index> blocks; void splitter() { for (ulonglong i = 0; i < this->A.cols(); i += blockSize) { blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong)this->A.cols() - i), i, std::min(blockSize, (ulonglong)this->A.rows() - i))); } } private: }; } #endif //PARALLELITERATIVE_DENSEOPTIMIZEDBLOCKJACOBI_H
iRCCE_isend.c
//*************************************************************************************** // Non-blocking send routines. //*************************************************************************************** // // Author: Rob F. Van der Wijngaart // Intel Corporation // Date: 008/30/2010 // //*************************************************************************************** // // Copyright 2010 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // [2010-10-25] added support for non-blocking send/recv operations // - iRCCE_isend(), ..._test(), ..._wait(), ..._push() // - iRCCE_irecv(), ..._test(), ..._wait(), ..._push() // by Carsten Clauss, Chair for Operating Systems, // RWTH Aachen University // // [2010-11-12] extracted non-blocking code into separate library // by Carsten Scholtes // // [2010-12-09] added cancel functions for non-blocking send/recv requests // by Carsten Clauss // // [2011-11-03] added non-blocking by synchronous send/recv functions: // iRCCE_issend() / iRCCE_isrecv() // #ifdef GORY #error iRCCE _cannot_ be built in GORY mode! #endif #include "iRCCE_lib.h" #ifdef __hermit__ #include "rte_memcpy.h" #define memcpy_scc rte_memcpy #elif defined COPPERRIDGE || defined SCC #include "scc_memcpy.h" #else #define memcpy_scc memcpy #endif static int iRCCE_push_send_request(iRCCE_SEND_REQUEST *request) { char padline[RCCE_LINE_SIZE]; // copy buffer, used if message not multiple of line size int test; // flag for calling iRCCE_test_flag() if(request->finished) return(iRCCE_SUCCESS); if(request->sync) return iRCCE_push_ssend_request(request); if(request->label == 1) goto label1; if(request->label == 2) goto label2; if(request->label == 3) goto label3; // send data in units of available chunk size of comm buffer for (; request->wsize< (request->size / request->chunk) * request->chunk; request->wsize += request->chunk) { request->bufptr = request->privbuf + request->wsize; request->nbytes = request->chunk; // copy private data to own comm buffer iRCCE_put(request->combuf, (t_vcharp) request->bufptr, request->nbytes, RCCE_IAM); RCCE_flag_write(request->sent, request->flag_set_value, request->dest); // wait for the destination to be ready to receive a message label1: iRCCE_test_flag(*(request->ready), request->flag_set_value, &test); if(!test) { request->label = 1; return(iRCCE_PENDING); } RCCE_flag_write(request->ready, RCCE_FLAG_UNSET, RCCE_IAM); } request->remainder = request->size % request->chunk; // if nothing is left over, we are done if (!request->remainder) { request->finished = 1; return(iRCCE_SUCCESS); } // send remainder of data--whole cache lines request->bufptr = request->privbuf + (request->size / request->chunk) * request->chunk; request->nbytes = request->remainder - request->remainder % RCCE_LINE_SIZE; if (request->nbytes) { // copy private data to own comm buffer iRCCE_put(request->combuf, (t_vcharp)request->bufptr, request->nbytes, RCCE_IAM); RCCE_flag_write(request->sent, request->flag_set_value, request->dest); // wait for the destination to be ready to receive a message label2: iRCCE_test_flag(*(request->ready), request->flag_set_value, &test); if(!test) { request->label = 2; return(iRCCE_PENDING); } RCCE_flag_write(request->ready, RCCE_FLAG_UNSET, RCCE_IAM); } request->remainder = request->size % request->chunk; request->remainder = request->remainder%RCCE_LINE_SIZE; // if nothing is left over, we are done if (!request->remainder) { request->finished = 1; return(iRCCE_SUCCESS); } // remainder is less than a cache line. This must be copied into appropriately sized // intermediate space before it can be sent to the receiver request->bufptr = request->privbuf + (request->size / request->chunk) * request->chunk + request->nbytes; request->nbytes = RCCE_LINE_SIZE; // copy private data to own comm buffer memcpy_scc(padline,request->bufptr,request->remainder); iRCCE_put(request->combuf, (t_vcharp)padline, request->nbytes, RCCE_IAM); RCCE_flag_write(request->sent, request->flag_set_value, request->dest); // wait for the destination to be ready to receive a message label3: iRCCE_test_flag(*(request->ready), request->flag_set_value, &test); if(!test) { request->label = 3; return(iRCCE_PENDING); } RCCE_flag_write(request->ready, RCCE_FLAG_UNSET, RCCE_IAM); request->finished = 1; return(iRCCE_SUCCESS); } static void iRCCE_init_send_request( char *privbuf, // source buffer in local private memory (send buffer) t_vcharp combuf, // intermediate buffer in MPB size_t chunk, // size of MPB available for this message (bytes) RCCE_FLAG *ready, // flag indicating whether receiver is ready RCCE_FLAG *sent, // flag indicating whether message has been sent by source size_t size, // size of message (bytes) int dest, // UE that will receive the message int sync, // flag indicating whether send is synchronous or not iRCCE_SEND_REQUEST *request ) { request->privbuf = privbuf; request->combuf = combuf; request->chunk = chunk; request->ready = ready; request->sent = sent; request->size = size; request->dest = dest; request->sync = sync; request->subchunk1 = ( (chunk / 2) / RCCE_LINE_SIZE ) * RCCE_LINE_SIZE; request->subchunk2 = chunk - request->subchunk1; request->wsize = 0; request->remainder = 0; request->nbytes = 0; request->bufptr = NULL; request->label = 0; request->finished = 0; request->next = NULL; #ifndef _iRCCE_ANY_LENGTH_ request->flag_set_value = RCCE_FLAG_SET; #else request->flag_set_value = (RCCE_FLAG_STATUS)size; #endif return; } //-------------------------------------------------------------------------------------- // FUNCTION: iRCCE_isend //-------------------------------------------------------------------------------------- // non-blocking send function; returns a handle of type iRCCE_SEND_REQUEST //-------------------------------------------------------------------------------------- static iRCCE_SEND_REQUEST blocking_isend_request; #ifdef _OPENMP #pragma omp threadprivate (blocking_isend_request) #endif inline static int iRCCE_isend_generic(char *privbuf, ssize_t size, int dest, iRCCE_SEND_REQUEST *request, int sync) { if(request == NULL) request = &blocking_isend_request; if(size == 0) { if(sync) { // just synchronize: size = 1; privbuf = (char*)&size; } else size = -1; } if(size < 0) { iRCCE_init_send_request(privbuf, RCCE_buff_ptr, RCCE_chunk, &RCCE_ready_flag[dest], &RCCE_sent_flag[RCCE_IAM], size, dest, sync, request); request->finished = 1; return(iRCCE_SUCCESS); } if (dest<0 || dest >= RCCE_NP) return(RCCE_error_return(RCCE_debug_comm,RCCE_ERROR_ID)); else { iRCCE_init_send_request(privbuf, RCCE_buff_ptr, RCCE_chunk, &RCCE_ready_flag[dest], &RCCE_sent_flag[RCCE_IAM], size, dest, sync, request); if(iRCCE_isend_queue == NULL) { if(iRCCE_push_send_request(request) == iRCCE_SUCCESS) { return(iRCCE_SUCCESS); } else { iRCCE_isend_queue = request; if(request == &blocking_isend_request) { iRCCE_isend_wait(request); return(iRCCE_SUCCESS); } return(iRCCE_PENDING); } } else { if(iRCCE_isend_queue->next == NULL) { iRCCE_isend_queue->next = request; } else { iRCCE_SEND_REQUEST *run = iRCCE_isend_queue; while(run->next != NULL) run = run->next; run->next = request; } if(request == &blocking_isend_request) { iRCCE_isend_wait(request); return(iRCCE_SUCCESS); } return(iRCCE_RESERVED); } } } int iRCCE_isend(char *privbuf, ssize_t size, int dest, iRCCE_SEND_REQUEST *request) { return iRCCE_isend_generic(privbuf, size, dest, request, 0); } int iRCCE_issend(char *privbuf, ssize_t size, int dest, iRCCE_SEND_REQUEST *request) { return iRCCE_isend_generic(privbuf, size, dest, request, 1); } //-------------------------------------------------------------------------------------- // FUNCTION: iRCCE_isend_push //-------------------------------------------------------------------------------------- // progress function for pending requests in the isend queue //-------------------------------------------------------------------------------------- int iRCCE_isend_push(void) { iRCCE_SEND_REQUEST *request = iRCCE_isend_queue; if(request == NULL) { return(iRCCE_SUCCESS); } if(request->finished) { return(iRCCE_SUCCESS); } iRCCE_push_send_request(request); if(request->finished) { iRCCE_isend_queue = request->next; return(iRCCE_SUCCESS); } return(iRCCE_PENDING); } //-------------------------------------------------------------------------------------- // FUNCTION: iRCCE_isend_test //-------------------------------------------------------------------------------------- // test function for completion of the requestes non-blocking send operation // Just provide NULL instead of testvar if you don't need it //-------------------------------------------------------------------------------------- int iRCCE_isend_test(iRCCE_SEND_REQUEST *request, int *test) { if(request == NULL) { iRCCE_isend_push(); if(iRCCE_isend_queue == NULL) { if (test) (*test) = 1; return(iRCCE_SUCCESS); } else { if (test) (*test) = 0; return(iRCCE_PENDING); } } if(request->finished) { if (test) (*test) = 1; return(iRCCE_SUCCESS); } if(iRCCE_isend_queue != request) { iRCCE_isend_push(); if(iRCCE_isend_queue != request) { if (test) (*test) = 0; return(iRCCE_RESERVED); } } iRCCE_push_send_request(request); if(request->finished) { iRCCE_isend_queue = request->next; if (test) (*test) = 1; return(iRCCE_SUCCESS); } if (test) (*test) = 0; return(iRCCE_PENDING); } //-------------------------------------------------------------------------------------- // FUNCTION: iRCCE_isend_wait //-------------------------------------------------------------------------------------- // just wait for completion of the requestes non-blocking send operation //-------------------------------------------------------------------------------------- int iRCCE_isend_wait(iRCCE_SEND_REQUEST *request) { if(request != NULL) { while(!request->finished) { iRCCE_isend_push(); iRCCE_irecv_push(); } } else { while(iRCCE_isend_queue != NULL) { iRCCE_isend_push(); iRCCE_irecv_push(); } } return(iRCCE_SUCCESS); } //-------------------------------------------------------------------------------------- // FUNCTION: iRCCE_isend_cancel //-------------------------------------------------------------------------------------- // try to cancel a pending non-blocking send request //-------------------------------------------------------------------------------------- int iRCCE_isend_cancel(iRCCE_SEND_REQUEST *request, int *test) { iRCCE_SEND_REQUEST *run; if( (request == NULL) || (request->finished) ) { if (test) (*test) = 0; return iRCCE_NOT_ENQUEUED; } if(iRCCE_isend_queue == NULL) { if (test) (*test) = 0; return iRCCE_NOT_ENQUEUED; } if(iRCCE_isend_queue == request) { if (test) (*test) = 0; return iRCCE_PENDING; } for(run = iRCCE_isend_queue; run->next != NULL; run = run->next) { // request found --> remove it from send queue: if(run->next == request) { run->next = run->next->next; if (test) (*test) = 1; return iRCCE_SUCCESS; } } if (test) (*test) = 0; return iRCCE_NOT_ENQUEUED; }
pi_omp_profesor.c
//gcc pi_omp_profesor.c -o x -fopenmp -lm && ./x #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> #define ITERATIONS 2e09 #define THREADS 8 #define PAD 8 int calculatePi(double *piTotal, int ID) { int start, end; start = (ITERATIONS/omp_get_num_threads()) * ID; end = (ITERATIONS/omp_get_num_threads()) * (ID + 1); int i = start; do{ *(piTotal + (ID*PAD)) = *(piTotal + (ID*PAD)) + (double)(4.0 / ((i*2)+1)); i++; *(piTotal + (ID*PAD)) = *(piTotal + (ID*PAD)) - (double)(4.0 / ((i*2)+1)); i++; }while(i < end); return 0; } int main() { int i, threads = THREADS; double pi[threads*PAD]; struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); for(i = 0; i < THREADS; i++) pi[i*PAD] = 0; #pragma omp parallel num_threads(threads) { int ID = omp_get_thread_num(); calculatePi(pi, ID); } for(i = 1; i < THREADS; i++) { *pi = *pi + *(pi + (i*PAD)); } gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); printf("Time elapsed: %ld.%06ld\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("pi: %2.10f \n", pi[0]); }
calculate_G.h
#pragma omp target teams distribute parallel for collapse(2) thread_limit(BLOCK_SIZE) for (int col = 1; col < NUM+1; col++) { for (int row = 1; row < NUM+1; row++) { if (row == NUM) { // top and bottom boundaries G(col, 0) = v(col, 0); G(col, NUM) = v(col, NUM); } else { // u velocities Real u_ij = u(col, row); Real u_ijp1 = u(col, row + 1); Real u_im1j = u(col - 1, row); Real u_im1jp1 = u(col - 1, row + 1); // v velocities Real v_ij = v(col, row); Real v_ijp1 = v(col, row + 1); Real v_ip1j = v(col + 1, row); Real v_ijm1 = v(col, row - 1); Real v_im1j = v(col - 1, row); // finite differences Real dv2dy, duvdx, d2vdx2, d2vdy2; dv2dy = ((v_ij + v_ijp1) * (v_ij + v_ijp1) - (v_ijm1 + v_ij) * (v_ijm1 + v_ij) + mix_param * (fabs(v_ij + v_ijp1) * (v_ij - v_ijp1) - fabs(v_ijm1 + v_ij) * (v_ijm1 - v_ij))) / (FOUR * dy); duvdx = ((u_ij + u_ijp1) * (v_ij + v_ip1j) - (u_im1j + u_im1jp1) * (v_im1j + v_ij) + mix_param * (fabs(u_ij + u_ijp1) * (v_ij - v_ip1j) - fabs(u_im1j + u_im1jp1) * (v_im1j - v_ij))) / (FOUR * dx); d2vdx2 = (v_ip1j - (TWO * v_ij) + v_im1j) / (dx * dx); d2vdy2 = (v_ijp1 - (TWO * v_ij) + v_ijm1) / (dy * dy); G(col, row) = v_ij + dt * (((d2vdx2 + d2vdy2) / Re_num) - dv2dy - duvdx + gy); } // end if } }
GB_binop__lt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_int8 // A.*B function (eWiseMult): GB_AemultB__lt_int8 // A*D function (colscale): GB_AxD__lt_int8 // D*A function (rowscale): GB_DxB__lt_int8 // C+=B function (dense accum): GB_Cdense_accumB__lt_int8 // C+=b function (dense accum): GB_Cdense_accumb__lt_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_int8 // C=scalar+B GB_bind1st__lt_int8 // C=scalar+B' GB_bind1st_tran__lt_int8 // C=A+scalar GB_bind2nd__lt_int8 // C=A'+scalar GB_bind2nd_tran__lt_int8 // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MINDSSCbox.h
void boxfilter(float* input,float* temp1,float* temp2, int hw, // mind_step int m,int n,int o){ int sz=m*n*o; for(int i=0;i<sz;i++){ temp1[i]=input[i];//copy to temp, initialization } ////y for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=1;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[(i-1)+j*m+k*m*n]; //add intensity value but index starting delayed } } } for(int k=0;k<o;k++){ //iterate over all z dimensions for(int j=0;j<n;j++){ // for x dim do (or y dim?) for(int i=0;i<(hw+1);i++){ // [0 to hw] temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]; // -0 at beginning, left of sliding index, copy offset y value? } for(int i=(hw+1);i<(m-hw);i++){ //[hw+1 to len-hw) ('box' can move free without hitting a border) temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; //delta (symmetric y val offset + dy / -dy) } for(int i=(m-hw);i<m;i++){ // [len-hw to len) temp2[i+j*m+k*m*n]=temp1[(m-1)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; //delta (last value of dim (fixed) - negative offseted value } } } ////x for(int k=0;k<o;k++){ for(int j=1;j<n;j++){ for(int i=0;i<m;i++){ temp2[i+j*m+k*m*n]+=temp2[i+(j-1)*m+k*m*n]; //add intensity value but reversed axis (other) } } } for(int k=0;k<o;k++){ for(int i=0;i<m;i++){ for(int j=0;j<(hw+1);j++){ //caution, dimensions were switched temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]; //see above, but for next dim (x) } for(int j=(hw+1);j<(n-hw);j++){ temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } for(int j=(n-hw);j<n;j++){ temp1[i+j*m+k*m*n]=temp2[i+(n-1)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } } } ////z //add intensity value but last reversed axis z for(int k=1;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[i+j*m+(k-1)*m*n]; } } } // see above, but now for last axis for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ for(int k=0;k<(hw+1);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]; } for(int k=(hw+1);k<(o-hw);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } for(int k=(o-hw);k<o;k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(o-1)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } } } } void imshift(float* input,float* output,int dx,int dy,int dz,int m,int n,int o){ // Get values of shifted patch. If coordinate is not within image dimensions return same intensity as original image (will result in zero distance) //shift image with preservation of original image values when shifted area is out of bounds for(int k=0;k<o;k++){ //z for(int j=0;j<n;j++){ //x for(int i=0;i<m;i++){ //y , iterate orig image size if(i+dy>=0 && i+dy<m && j+dx>=0 && j+dx<n && k+dz>=0 && k+dz<o) //this is something like the min(max) construct output[i+j*m+k*m*n]=input[i+dy+(j+dx)*m+(k+dz)*m*n]; //lookup displaced values and return else output[i+j*m+k*m*n]=input[i+j*m+k*m*n]; //lookup value w/o displacement (just copy), sth like 'inversed mirroring ad edged = normal image at egdes' } } } } /*void *distances(void *threadarg) { struct mind_data *my_data; my_data = (struct mind_data *) threadarg; float* im1=my_data->im1; float* d1=my_data->d1; int qs=my_data->qs; int ind_d1=my_data->ind_d1; int m=image_m; int n=image_n; int o=image_o;*/ void distances(float* im1,float* d1,int m,int n,int o,int qs,int l){ int sz1=m*n*o; float* w1=new float[sz1]; int len1=6;//not needed float* temp1=new float[sz1]; //img size temp float* temp2=new float[sz1]; //img size temp // int dx[6]={+qs, +qs, -qs, 0, +qs, +0 }; //redifinition // int dy[6]={+qs, -qs, 0, -qs, 0, +qs}; //redefinition // int dz[6]={0, 0, +qs, +qs, +qs, +qs}; //redefiniton int dx[6]={+qs, +qs, +qs, +qs, 0, 0}; //redifinition int dy[6]={+qs, -qs, 0, 0, +qs, +qs}; //redefinition int dz[6]={0, 0, +qs, -qs, +qs, -qs}; //redefiniton //dx, dy, dz could be passed directly to this function from upper call to omit redefinitions // Offset patches in every 6-neighbourhood direction by quanstisation step (radius) imshift(im1,w1,dx[l],dy[l],dz[l],m,n,o); // std::cout<<"\nw1="; // for(int pri=0;pri<m*n*o ;pri++){ // std::cout<<w1[pri]<<" "; // } // std::cout<<"\nw2="; // for(int pri=0;pri<sz_pad ;pri++){ // std::cout<<w2[pri]<<" "; // } for(int i=0;i<sz1;i++){ w1[i]=(w1[i]-im1[i])*(w1[i]-im1[i]); //(0-im[i])^2 = squared img dist from intensity val } // for(int k=0;k<o;k++){ // for(int j=0;j<m;j++){ // for(int i=0;i<n;i++){ // int w2_coord = i-dx[l] + (j-dy[l])*n + (k-dz[l])*m*n; // int im_coord = i + j*n + k*m*n; // w2[w2_coord] = (w2[w2_coord] - im1[im_coord]) * (w2[w2_coord] - im1[im_coord]); //(0-im[i])^2 = squared img dist from intensity val // } // } // } // std::cout<<"\nsquared_patch_distance="; // for(int pri=0;pri<m*n*o ;pri++){ // std::cout<<w1[pri]<<" "; // } // for(int pri=0;pri<sz_pad ;pri++){ // std::cout<<w2[pri]<<" "; // } //3 dim box filter = sth. like blur // boxfilter(w1,temp1,temp2,qs,m,n,o); //w1 is input and output for(int i=0;i<sz1;i++){ d1[i+l*sz1]=w1[i]; } delete[] temp1; delete[] temp2; delete[] w1; } //__builtin_popcountll(left[i]^right[i]); absolute hamming distances void descriptor(uint64_t* mindq,float* im1, int m,int n,int o, //image dims int qs, float* output_mind_twelve=0, float* output_mind_bare=0){ //mind_step (chain values smaller than quantisation chain) timeval time1,time2; //MIND with self-similarity context //3^3 shift combinations (+qs,0,-qs) (x-dir, y-dir, z-dir) but only adjacent placed to origin (no diagonals) = 6 combinations // int sx[12]={-qs,+0, -qs,+0, +0, +qs,+0, +0, +0, -qs,+0, +0}; //is that MIND-SSC? // int sy[12]={+0, -qs,+0, +qs,+0, +0, +0, +qs,+0, +0, +0, -qs}; // int sz[12]={+0, +0, +0, +0, -qs,+0, -qs,+0, -qs,+0, -qs,+0}; // int sy[12]={ +0, -qs, -qs, +0, +0, +qs, +0, +0, +0, -qs, +0, +0}; //is that MIND-SSC? // int sx[12]={-qs, +0, +0, +qs, +0, +0, +0, +qs, +0, +0, +0, -qs}; // int sz[12]={+0, +0, +0, +0, -qs, +0, -qs, +0, -qs, +0, -qs, +0}; int sy[12]={0, -qs, -qs, 0, -qs, 0, -qs, 0, 0, 0, 0, 0 }; //is that MIND-SSC? int sx[12]={-qs, 0, 0, qs, 0, 0, 0, 0, -qs, 0, -qs, 0 }; int sz[12]={0, 0, 0, 0, 0, -qs, 0, +qs, 0, -qs, 0, qs}; int index[12]={0,0,1,1,2,2,3,3,4,4,5,5}; float sigma=0.75;//1.0;//0.75;//1.5; int rho=ceil(sigma*1.5)*2+1; // is unused! int len1=6; //len dx dy dz const int len2=12; //len sx sy sz image_d=12; int d=12; int sz1=m*n*o; pthread_t thread1, thread2, thread3; //============== DISTANCES USING BOXFILTER =================== float* d1=new float[sz1*len1]; //img_size * (dx,dy,dz) (6 neighbourhood) gettimeofday(&time1, NULL); #pragma omp parallel for for(int l=0;l<len1;l++){ //for all dx, dy, dz //l iterator controls the shift package (dx,dy,dz) distances(im1,d1,m,n,o,qs,l); //d1 is returned (stored distances per x,y,z,l dimension, 4-dim) } std::cout<<"\ndistances="; for(int pri=0;pri<m*n*o*6 ;pri++){ std::cout<<d1[pri]<<" "; } gettimeofday(&time2, NULL); float timeMIND1=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); gettimeofday(&time1, NULL); //quantisation table const int val=6; const unsigned long long power=32; #pragma omp parallel for // float mind_twelve[m*n*o*12]; for(int k=0;k<o;k++){ //iterate z //could be moved out of loop unsigned int tablei[6]={0,1,3,7,15,31}; //lookup table float compare[val-1]; for(int i=0;i<val-1;i++){ compare[i]=-log((i+1.5f)/val);//compare is const for every iteration } //could be moved out of loop float mind1[12];//is this correct here? -> could be moved before l_iter loop (only needed within l_iter loop) for(int j=0;j<n;j++){//iterate x for(int i=0;i<m;i++){ //iterate y std::cout<<"\nmind1_bare: "; for(int l=0;l<len2;l++){ // iterate (sx,sy,sz) / index[l] package to get 12 mind values int eff_sx = min(max(0, i+sx[l]),m-1); int eff_sy = min(max(0, j+sy[l]),n-1); int eff_sz = min(max(0, k+sz[l]),o-1); // consecutive l's will take same d1 offset (0...6) but different spatial coordinate mind1[l]=d1[eff_sx + eff_sy*m + eff_sz*m*n+index[l]*sz1]; //mind1 is 1-dim, size=12 // -> take same node but calc 2 diffs per node and add? 2*6diffs = 12 // if(i+sx[l]>=0 && i+sx[l]<m && j+sy[l]>=0 && j+sy[l]<n && k+sz[l]>=0 && k+sz[l]<o){ //min(max) construct // mind1[l]=d1[i+sx[l]+(j+sy[l])*m+(k+sz[l])*m*n+index[l]*sz1]; //mind1 is 1-dim, size=12 // //read offseted distance value // } // else{ // mind1[l]=d1[i+j*m+k*m*n+index[l]*sz1]; //(sz1=m*n*o) builds 12-neighbourhood // //read without (sx,sy,sz) ofset but with l=12 layer offset // } } float minval=*min_element(mind1,mind1+len2); //get minimum value of all 12 stored mind1 features float sumnoise=0.0f; for(int l=0;l<len2;l++){ mind1[l]-=minval; //reset minimum value of mind1 to 0 and lower others accordingly std::cout<<mind1[l]<<" "; output_mind_bare[i+j*m+k*m*n+l*sz1] = mind1[l]; sumnoise+=mind1[l]; //accumulate } float noise1=max(sumnoise/(float)len2,1e-6f); //rescale accumulated mind features for(int l=0;l<len2;l++){ mind1[l]/=noise1; } unsigned long long accum=0; unsigned long long tabled1=1; for(int l=0;l<len2;l++){ //iterate over all mind features //mind1[l]=exp(-mind1[l]); int mind1val=0; for(int c=0;c<val-1;c++){ //accumulate over 5 values mind1val+=compare[c]>mind1[l]?1:0; //count if mind feature is smaller than compare const } //int mind1val=min(max((int)(mind1[l]*val-0.5f),0),val-1); accum+=tablei[mind1val]*tabled1; //*32^l, propably this was done because of casting to long long tabled1*=power; } mindq[i+j*m+k*m*n]=accum; //one mind value for every coordinate xyz } } } // std::cout<<"\nmindq="; // for(int pri=0;pri<m*n*o ;pri++){ // std::cout<<mindq[pri]<<" "; // } // std::cout<<"\nmind_twelve="; // for(int pri=0;pri<m*n*o*12 ;pri++){ // std::cout<<output_mind_twelve[pri]<<" "; // } gettimeofday(&time2, NULL); float timeMIND2=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); delete[] d1; } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> mind_ssc_descriptor( torch::Tensor image, torch::Tensor pQuantisation_step) { int o = image.size(0); int n = image.size(1); int m = image.size(2); torch::Tensor image_copy = image.clone(); float* input_image = image_copy.data_ptr<float>(); int* quantisation_step = pQuantisation_step.data_ptr<int>(); uint64_t* output = new uint64_t[m*n*o]; float* output_mind_twelve = new float[m*n*o*12]; float* output_mind_bare = new float[m*n*o*12]; descriptor(output, input_image, o, m, n, *quantisation_step, output_mind_twelve, output_mind_bare); std::vector<uint64_t> output_vect{output, output+m*n*o}; std::vector<float> output_mind_bare_vect{output_mind_bare, output_mind_bare+m*n*o*12}; std::vector<float> output_mind_twelve_vect{output_mind_twelve, output_mind_twelve+m*n*o*12}; auto options = torch::TensorOptions().dtype(torch::kInt64); auto float_options = torch::TensorOptions().dtype(torch::kFloat); return std::tuple< torch::Tensor, torch::Tensor, torch::Tensor>( torch::from_blob(output_vect.data(), {n,m,o}, options).clone(), torch::from_blob(output_mind_twelve_vect.data(), {12,n,m,o}, float_options).clone(), torch::from_blob(output_mind_bare_vect.data(), {12,n,m,o}, float_options).clone() ); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
1d_array_ptr.c
#include <stdlib.h> #include <omp.h> int main() { int* arr = malloc(sizeof(int) * 4); #pragma omp parallel { arr[0] = 0; arr[1] = 1; arr[2] = 2; arr[3] = 3; printf("[%d, %d, %d, %d]\n", arr[0], arr[1], arr[2], arr[3]); } free(arr); }
omp_parallel_default.c
<ompts:test> <ompts:testdescription>Test which checks the default option of the parallel construct.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel default</ompts:directive> <ompts:testcode> #include <stdio.h> #include <unistd.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_default</ompts:testcode:functionname> (FILE * logFile) { int i; int sum = 0; int known_sum; int mysum; known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 ; #pragma omp parallel <ompts:check>default(shared)</ompts:check> private(mysum) { mysum = 0; #pragma omp for for (i = 1; i <= LOOPCOUNT; i++) { mysum = mysum + i; } #pragma omp critical { sum = sum + mysum; } /* end of critical */ } /* end of parallel */ if (known_sum != sum) { fprintf(logFile, "KNOWN_SUM = %d; SUM = %d\n", known_sum, sum); } return (known_sum == sum); } </ompts:testcode> </ompts:test>
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unop__identity_uint16_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_fc64 // op(A') function: GB_unop_tran__identity_uint16_fc64 // C type: uint16_t // A type: GxB_FC64_t // cast: uint16_t cij = GB_cast_to_uint16_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_fc64 ( uint16_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
threading_utils.h
/*! * Copyright 2015-2019 by Contributors * \file common.h * \brief Threading utilities */ #ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <dmlc/omp.h> #include <algorithm> #include <limits> #include <type_traits> // std::is_signed #include <vector> #include "xgboost/logging.h" #if !defined(_OPENMP) extern "C" { inline int32_t omp_get_thread_limit() __GOMP_NOTHROW { return 1; } // NOLINT } #endif // !defined(_OPENMP) // MSVC doesn't implement the thread limit. #if defined(_OPENMP) && defined(_MSC_VER) extern "C" { inline int32_t omp_get_thread_limit() { return std::numeric_limits<int32_t>::max(); } // NOLINT } #endif // defined(_MSC_VER) namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template <typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); CHECK_GE(nthreads, 1); dmlc::OMPException exc; #pragma omp parallel num_threads(nthreads) { exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } exc.Rethrow(); } /** * OpenMP schedule */ struct Sched { enum { kAuto, kDynamic, kStatic, kGuided, } sched; size_t chunk{0}; Sched static Auto() { return Sched{kAuto}; } Sched static Dyn(size_t n = 0) { return Sched{kDynamic, n}; } Sched static Static(size_t n = 0) { return Sched{kStatic, n}; } Sched static Guided() { return Sched{kGuided}; } }; template <typename Index, typename Func> void ParallelFor(Index size, int32_t n_threads, Sched sched, Func fn) { #if defined(_MSC_VER) // msvc doesn't support unsigned integer as openmp index. using OmpInd = std::conditional_t<std::is_signed<Index>::value, Index, omp_ulong>; #else using OmpInd = Index; #endif OmpInd length = static_cast<OmpInd>(size); CHECK_GE(n_threads, 1); dmlc::OMPException exc; switch (sched.sched) { case Sched::kAuto: { #pragma omp parallel for num_threads(n_threads) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } case Sched::kDynamic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(dynamic) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(dynamic, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kStatic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(static) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(static, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kGuided: { #pragma omp parallel for num_threads(n_threads) schedule(guided) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, int32_t n_threads, Func fn) { ParallelFor(size, n_threads, Sched::Static(), fn); } inline int32_t OmpGetThreadLimit() { int32_t limit = omp_get_thread_limit(); CHECK_GE(limit, 1) << "Invalid thread limit for OpenMP."; return limit; } inline int32_t OmpGetNumThreads(int32_t n_threads) { if (n_threads <= 0) { n_threads = std::min(omp_get_num_procs(), omp_get_max_threads()); } n_threads = std::min(n_threads, OmpGetThreadLimit()); n_threads = std::max(n_threads, 1); return n_threads; } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
ccl_cls.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_integration.h> #include "ccl.h" typedef struct{ double l; ccl_cosmology *cosmo; ccl_cl_tracer_collection_t *trc1; ccl_cl_tracer_collection_t *trc2; ccl_f2d_t *psp; int *status; } integ_cl_par; static void get_k_interval(ccl_cosmology *cosmo, ccl_cl_tracer_collection_t *trc1, ccl_cl_tracer_collection_t *trc2, double l, double *lkmin, double *lkmax) { int itr; // Loop through all tracers and find distance bounds double chi_min1 = 1E15; double chi_max1 = -1E15; for (itr=0; itr < trc1->n_tracers; itr++) { if (trc1->ts[itr]->chi_min < chi_min1) chi_min1 = trc1->ts[itr]->chi_min; if (trc1->ts[itr]->chi_max > chi_max1) chi_max1 = trc1->ts[itr]->chi_max; } double chi_min2 = 1E15; double chi_max2 = -1E15; for (itr=0; itr < trc2->n_tracers; itr++) { if (trc2->ts[itr]->chi_min < chi_min2) chi_min2 = trc2->ts[itr]->chi_min; if (trc2->ts[itr]->chi_max > chi_max2) chi_max2 = trc2->ts[itr]->chi_max; } // Find maximum of minima and minimum of maxima // (i.e. edges where the product of both kernels will have support). double chi_min = fmax(chi_min1, chi_min2); double chi_max = fmin(chi_max1, chi_max2); if (chi_min <= 0) chi_min = 0.5*(l+0.5)/cosmo->spline_params.K_MAX; // Don't go beyond kmax *lkmax = log(fmin(cosmo->spline_params.K_MAX, 2*(l+0.5)/chi_min)); *lkmin = log(fmax(cosmo->spline_params.K_MIN, (l+0.5)/chi_max)); } static double transfer_limber_single(ccl_cl_tracer_t *tr, double l, double lk, double k, double chi_l, double a_l, ccl_cosmology *cosmo, ccl_f2d_t *psp, int *status) { double dd = 0; // Kernel and transfer evaluated at chi_l double w = ccl_cl_tracer_t_get_kernel(tr, chi_l, status); double t = ccl_cl_tracer_t_get_transfer(tr, lk,a_l, status); double fl = ccl_cl_tracer_t_get_f_ell(tr, l, status); if (tr->der_bessel < 1) { //We don't need l+1 dd = w*t; if (tr->der_bessel == -1) { //If we divide by (chi*k)^2 double lp1h = l+0.5; dd /= (lp1h*lp1h); } } else { // We will need l+1 // Compute chi_{l+1} and a_{l+1} double lp1h = l+0.5; double lp3h = l+1.5; double chi_lp = lp3h/k; double a_lp = ccl_scale_factor_of_chi(cosmo, chi_lp, status); // Compute power spectrum ratio there double pk_ratio = fabs(ccl_f2d_t_eval(psp, lk, a_lp, cosmo, status) / ccl_f2d_t_eval(psp, lk, a_l, cosmo, status)); // Compute kernel and trasfer at chi_{l+1} double w_p = ccl_cl_tracer_t_get_kernel(tr, chi_lp, status); double t_p = ccl_cl_tracer_t_get_transfer(tr, lk,a_lp, status); // sqrt(2l+1/2l+3) double sqell = sqrt(lp1h*pk_ratio/lp3h); if (tr->der_bessel == 1) dd = l*w*t/lp1h-sqell*w_p*t_p; else //we assume der_bessel=2 here to avoid extra if clause dd = sqell*2*w_p*t_p/lp3h - (0.25+2*l)*w*t/(lp1h*lp1h); } return dd*fl; } static double transfer_limber_wrap(double l,double lk, double k, double chi, double a, ccl_cl_tracer_collection_t *trc, ccl_cosmology *cosmo,ccl_f2d_t *psp, int *status) { int itr; double transfer = 0; for (itr=0; itr < trc->n_tracers; itr++) { transfer += transfer_limber_single( trc->ts[itr], l, lk, k, chi, a, cosmo, psp, status); if (*status != 0) return -1; } return transfer; } static double cl_integrand(double lk, void *params) { double d1, d2; integ_cl_par *p = (integ_cl_par *)params; double k = exp(lk); double chi = (p->l+0.5)/k; double a = ccl_scale_factor_of_chi(p->cosmo, chi, p->status); d1 = transfer_limber_wrap(p->l, lk, k, chi, a, p->trc1, p->cosmo, p->psp, p->status); if (d1 == 0) return 0; d2 = transfer_limber_wrap(p->l, lk, k, chi, a, p->trc2, p->cosmo, p->psp, p->status); if (d2 == 0) return 0; double pk = ccl_f2d_t_eval(p->psp, lk, a, p->cosmo, p->status); return k*pk*d1*d2; } void ccl_angular_cls_limber(ccl_cosmology *cosmo, ccl_cl_tracer_collection_t *trc1, ccl_cl_tracer_collection_t *trc2, ccl_f2d_t *psp, int nl_out, double *l_out, double *cl_out, int *status) { int local_status; int clastatus, lind; integ_cl_par ipar; gsl_function F; double lkmin, lkmax, l; double result, eresult; int gslstatus; gsl_integration_workspace *w; gsl_integration_cquad_workspace *w_cquad; size_t nevals; // make sure to init core things for safety if (!cosmo->computed_distances) { *status = CCL_ERROR_DISTANCES_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_cls.c: ccl_angular_cl_limber(): distance splines have not been precomputed!"); return; } // Figure out which power spectrum to use ccl_f2d_t *psp_use; if (psp == NULL) { if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_cls.c: ccl_angular_cl_limber(): non-linear power spctrum has not been computed!"); return; } psp_use = cosmo->data.p_nl; } else psp_use = psp; #pragma omp parallel private(lind, clastatus, ipar, lkmin, lkmax, result, \ eresult, gslstatus, w, w_cquad, nevals, l, F, \ local_status) \ shared(cosmo, trc1, trc2, l_out, cl_out, \ nl_out, status, psp_use) \ default(none) { w_cquad = NULL; w = NULL; local_status = *status; if (local_status == 0) { w = gsl_integration_workspace_alloc(cosmo->gsl_params.N_ITERATION); if (w == NULL) { local_status = CCL_ERROR_MEMORY; } } if (local_status == 0) { // Set up integrating function ipar.cosmo = cosmo; ipar.trc1 = trc1; ipar.trc2 = trc2; ipar.psp = psp_use; ipar.status = &clastatus; F.function = &cl_integrand; F.params = &ipar; } #pragma omp for schedule(dynamic) for (lind=0; lind < nl_out; ++lind) { if (local_status == 0) { l = l_out[lind]; clastatus = 0; ipar.l = l; // Get integration limits get_k_interval(cosmo, trc1, trc2, l, &lkmin, &lkmax); // Integrate gslstatus = gsl_integration_qag(&F, lkmin, lkmax, 0, cosmo->gsl_params.INTEGRATION_LIMBER_EPSREL, cosmo->gsl_params.N_ITERATION, cosmo->gsl_params.INTEGRATION_LIMBER_GAUSS_KRONROD_POINTS, w, &result, &eresult); // Test if a round-off error occured in the evaluation of the integral // If so, try another integration function, more robust but potentially slower if (gslstatus == GSL_EROUND) { ccl_raise_gsl_warning( gslstatus, "ccl_cls.c: ccl_angular_cl_limber(): " "Default GSL integration failure, attempting backup method."); if (w_cquad == NULL) { w_cquad = gsl_integration_cquad_workspace_alloc(cosmo->gsl_params.N_ITERATION); if (w_cquad == NULL) { local_status = CCL_ERROR_MEMORY; } } if (local_status == 0) { nevals = 0; gslstatus = gsl_integration_cquad( &F, lkmin, lkmax, 0, cosmo->gsl_params.INTEGRATION_LIMBER_EPSREL, w_cquad, &result, &eresult, &nevals); } } if (gslstatus == GSL_SUCCESS && (*ipar.status == 0) && local_status == 0) { cl_out[lind] = result / (l+0.5); } else { ccl_raise_gsl_warning(gslstatus, "ccl_cls.c: ccl_angular_cl_limber():"); cl_out[lind] = NAN; local_status = CCL_ERROR_INTEG; } } } gsl_integration_workspace_free(w); gsl_integration_cquad_workspace_free(w_cquad); if (local_status) { #pragma omp atomic write *status = local_status; } } if (*status) { ccl_cosmology_set_status_message( cosmo, "ccl_cls.c: ccl_angular_cls_limber(); integration error\n"); } } void ccl_angular_cls_nonlimber(ccl_cosmology *cosmo, ccl_cl_tracer_collection_t *trc1, ccl_cl_tracer_collection_t *trc2, ccl_f2d_t *psp, int nl_out, int *l_out, double *cl_out, int *status) { *status = CCL_ERROR_INCONSISTENT; ccl_cosmology_set_status_message( cosmo, "ccl_cls.c: ccl_angular_cls_nonlimber(); non-Limber integrator not implemented yet\n"); }
parallel_mmul.c
#include <stdio.h> #include "include/Matrix.h" int parallelMatrixMul(Matrix a, Matrix b, Matrix *result) { if (a.columnCount != b.rowCount) { printf("Error: wrong matrix dimensions.\n"); return 1; } #pragma omp parallel for collapse(2) for (int i = 0; i < a.rowCount; ++i) { for (int k = 0; k < b.columnCount; ++k) { for (int j = 0; j < a.columnCount; ++j) { addToElementValue(result, i, k, getElementValue(a, i, j) * getElementValue(b, j, k)); } } } return 0; }
a.35.2.c
/* { dg-do compile } */ void work (int, int); void work1 (int i, int n) { int j; /* incorrect nesting of loop regions */ #pragma omp for for (j = 0; j < n; j++) work (i, j); } void wrong2 (int n) { #pragma omp parallel default(shared) { int i; #pragma omp for for (i = 0; i < n; i++) work1 (i, n); } }
spmv.h
#pragma once #include <thrust/functional.h> #include <cusp/detail/functional.h> //MW: add some OpenMP pragmas namespace cusp { namespace detail { namespace host { ////////////// // COO SpMV // ////////////// template <typename Matrix, typename Vector1, typename Vector2, typename UnaryFunction, typename BinaryFunction1, typename BinaryFunction2> void spmv_coo(const Matrix& A, const Vector1& x, Vector2& y, UnaryFunction initialize, BinaryFunction1 combine, BinaryFunction2 reduce) { typedef typename Matrix::index_type IndexType; typedef typename Vector2::value_type ValueType; for(size_t i = 0; i < A.num_rows; i++) y[i] = initialize(y[i]); for(size_t n = 0; n < A.num_entries; n++) { const IndexType& i = A.row_indices[n]; const IndexType& j = A.column_indices[n]; const ValueType& Aij = A.values[n]; const ValueType& xj = x[j]; y[i] = reduce(y[i], combine(Aij, xj)); } } template <typename Matrix, typename Vector1, typename Vector2> void spmv_coo(const Matrix& A, const Vector1& x, Vector2& y) { typedef typename Vector2::value_type ValueType; spmv_coo(A, x, y, cusp::detail::zero_function<ValueType>(), thrust::multiplies<ValueType>(), thrust::plus<ValueType>()); } ////////////// // CSR SpMV // ////////////// template <typename Matrix, typename Vector1, typename Vector2, typename UnaryFunction, typename BinaryFunction1, typename BinaryFunction2> void spmv_csr(const Matrix& A, const Vector1& x, Vector2& y, UnaryFunction initialize, BinaryFunction1 combine, BinaryFunction2 reduce) { typedef typename Matrix::index_type IndexType; typedef typename Vector2::value_type ValueType; #pragma omp parallel for for(size_t i = 0; i < A.num_rows; i++) { const IndexType& row_start = A.row_offsets[i]; const IndexType& row_end = A.row_offsets[i+1]; ValueType accumulator = initialize(y[i]); for (IndexType jj = row_start; jj < row_end; jj++) { const IndexType& j = A.column_indices[jj]; const ValueType& Aij = A.values[jj]; const ValueType& xj = x[j]; accumulator = reduce(accumulator, combine(Aij, xj)); } y[i] = accumulator; } } template <typename Matrix, typename Vector1, typename Vector2> void spmv_csr(const Matrix& A, const Vector1& x, Vector2& y) { typedef typename Vector2::value_type ValueType; spmv_csr(A, x, y, cusp::detail::zero_function<ValueType>(), thrust::multiplies<ValueType>(), thrust::plus<ValueType>()); } ////////////// // DIA SpMV // ////////////// template <typename Matrix, typename Vector1, typename Vector2, typename UnaryFunction, typename BinaryFunction1, typename BinaryFunction2> void spmv_dia(const Matrix& A, const Vector1& x, Vector2& y, UnaryFunction initialize, BinaryFunction1 combine, BinaryFunction2 reduce) { typedef typename Matrix::index_type IndexType; typedef typename Vector2::value_type ValueType; const size_t num_diagonals = A.values.num_cols; for(size_t i = 0; i < A.num_rows; i++) y[i] = initialize(y[i]); for(size_t i = 0; i < num_diagonals; i++) { const IndexType& k = A.diagonal_offsets[i]; const IndexType& i_start = std::max<IndexType>(0, -k); const IndexType& j_start = std::max<IndexType>(0, k); // number of elements to process in this diagonal const IndexType N = std::min(A.num_rows - i_start, A.num_cols - j_start); for(IndexType n = 0; n < N; n++) { const ValueType& Aij = A.values(i_start + n, i); const ValueType& xj = x[j_start + n]; ValueType& yi = y[i_start + n]; yi = reduce(yi, combine(Aij, xj)); } } } template <typename Matrix, typename Vector1, typename Vector2> void spmv_dia(const Matrix& A, const Vector1& x, Vector2& y) { typedef typename Vector2::value_type ValueType; spmv_dia(A, x, y, cusp::detail::zero_function<ValueType>(), thrust::multiplies<ValueType>(), thrust::plus<ValueType>()); } ////////////// // ELL SpMV // ////////////// template <typename Matrix, typename Vector1, typename Vector2, typename UnaryFunction, typename BinaryFunction1, typename BinaryFunction2> void spmv_ell(const Matrix& A, const Vector1& x, Vector2& y, UnaryFunction initialize, BinaryFunction1 combine, BinaryFunction2 reduce) { typedef typename Matrix::index_type IndexType; typedef typename Vector2::value_type ValueType; const size_t& num_entries_per_row = A.column_indices.num_cols; const IndexType invalid_index = Matrix::invalid_index; for(size_t i = 0; i < A.num_rows; i++) y[i] = initialize(y[i]); for(size_t n = 0; n < num_entries_per_row; n++) { for(size_t i = 0; i < A.num_rows; i++) { const IndexType& j = A.column_indices(i, n); const ValueType& Aij = A.values(i,n); if (j != invalid_index) { const ValueType& xj = x[j]; y[i] = reduce(y[i], combine(Aij, xj)); } } } } template <typename Matrix, typename Vector1, typename Vector2> void spmv_ell(const Matrix& A, const Vector1& x, Vector2& y) { typedef typename Vector2::value_type ValueType; spmv_ell(A, x, y, cusp::detail::zero_function<ValueType>(), thrust::multiplies<ValueType>(), thrust::plus<ValueType>()); } } // end namespace host } // end namespace detail } // end namespace cusp
Parallel.h
#pragma once #include <ATen/ATen.h> #include <atomic> #include <cstddef> #include <exception> #ifdef _OPENMP #include <omp.h> #endif namespace at { namespace internal { // This parameter is heuristically chosen to determine the minimum number of // work that warrants paralellism. For example, when summing an array, it is // deemed inefficient to parallelise over arrays shorter than 32768. Further, // no parallel algorithm (such as parallel_reduce) should split work into // smaller than GRAIN_SIZE chunks. constexpr int64_t GRAIN_SIZE = 32768; } // namespace internal inline int64_t divup(int64_t x, int64_t y) { return (x + y - 1) / y; } inline int get_max_threads() { #ifdef _OPENMP return omp_get_max_threads(); #else return 1; #endif } inline int get_thread_num() { #ifdef _OPENMP return omp_get_thread_num(); #else return 0; #endif } inline bool in_parallel_region() { #ifdef _OPENMP return omp_in_parallel(); #else return false; #endif } template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f) { #ifdef _OPENMP std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; #pragma omp parallel if (!omp_in_parallel() && ((end - begin) >= grain_size)) { int64_t num_threads = omp_get_num_threads(); int64_t tid = omp_get_thread_num(); int64_t chunk_size = divup((end - begin), num_threads); int64_t begin_tid = begin + tid * chunk_size; if (begin_tid < end) { try { f(begin_tid, std::min(end, chunk_size + begin_tid)); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } } if (eptr) { std::rethrow_exception(eptr); } #else if (begin < end) { f(begin, end); } #endif } /* parallel_reduce begin: index at which to start applying reduction end: index at which to stop applying reduction grain_size: number of elements per chunk. impacts number of elements in intermediate results tensor and degree of parallelization. ident: identity for binary combination function sf. sf(ident, x) needs to return x. f: function for reduction over a chunk. f needs to be of signature scalar_t f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy) sf: function to combine two partial results. sf needs to be of signature scalar_t sf(scalar_t x, scalar_t y) For example, you might have a tensor of 10000 entires and want to sum together all the elements. Parallel_reduce with a grain_size of 2500 will then allocate an intermediate result tensor with 4 elements. Then it will execute the function "f" you provide and pass the beginning and end index of these chunks, so 0-2499, 2500-4999, etc. and the combination identity. It will then write out the result from each of these chunks into the intermediate result tensor. After that it'll reduce the partial results from each chunk into a single number using the combination function sf and the identity ident. For a total summation this would be "+" and 0 respectively. This is similar to tbb's approach [1], where you need to provide a function to accumulate a subrange, a function to combine two partial results and an identity. [1] https://software.intel.com/en-us/node/506154 */ template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F f, const SF sf) { if (get_num_threads() == 1) { return f(begin, end, ident); } else { const int64_t num_results = divup((end - begin), grain_size); std::vector<scalar_t> results(num_results); scalar_t* results_data = results.data(); #pragma omp parallel for if ((end - begin) >= grain_size) for (int64_t id = 0; id < num_results; id++) { int64_t i = begin + id * grain_size; results_data[id] = f(i, i + std::min(end - i, grain_size), ident); } return std::accumulate( results_data, results_data + results.size(), ident, sf); } } } // namespace at
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-5,6)),ceild(8*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(4*t1+Ny+5,24)),floord(8*t2+Ny+4,24)),floord(8*t1-8*t2+Nz+Ny+3,24));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(24*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(24*t3+Nx+20,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),24*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),24*t3+22),256*t4+254),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
conv3x3s1_winograd64_neon4_BdB.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd64_neon4_BdB(const Mat& bottom_blob, Mat& top_blob, const Option& opt, int outch, int inch, int outh, int outw) { // BEGIN transform input int w = bottom_blob.w; //int h = bottom_blob.h; Mat bottom_blob_bordered = bottom_blob; Mat bottom_blob_tm = top_blob; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch, 4u, opt.workspace_allocator); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff+4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w*2; const float* r3 = r0 + w*3; // the assembly block for armv7 input transform requires 13 general registers // old gcc may fail to allocate register on debug build without -fomit-frame-pointer // so, fallback to intrinsic version for armv7 debug build --- nihui #if __aarch64__ || !defined(NDEBUG) for (int m=0; m+3<8; m+=4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0+4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1+4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2+4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3+4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w*4; r1 += w*4; r2 += w*4; r3 += w*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m+3<8; m+=4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2+4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3); float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3); float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3); float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3); t0 += 8*4; t1 += 8*4; t2 += 8*4; t3 += 8*4; r0_tm0_0 += img0_tm.w*tiles*2*4; r0_tm0_4 += img0_tm.w*tiles*2*4; r0_tm1_0 += img0_tm.w*tiles*2*4; r0_tm1_4 += img0_tm.w*tiles*2*4; r0_tm2_0 += img0_tm.w*tiles*2*4; r0_tm2_4 += img0_tm.w*tiles*2*4; r0_tm3_0 += img0_tm.w*tiles*2*4; r0_tm3_4 += img0_tm.w*tiles*2*4; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); int step = img0_tm.w*tiles*2*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%2], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4], %26 \n" "vst1.f32 {d17[1]}, [%6], %26 \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "sub %0, %0, #12 \n" "sub %2, %2, #12 \n" "sub %4, %4, #12 \n" "sub %6, %6, #12 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1], %26 \n" "vst1.f32 {d4-d5}, [%3], %26 \n" "vst1.f32 {d6-d7}, [%5], %26 \n" "vst1.f32 {d12-d13}, [%7], %26 \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0] \n" "vst1.f32 {d16[1]}, [%2] \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4] \n" "vst1.f32 {d17[1]}, [%6] \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1] \n" "vst1.f32 {d4-d5}, [%3] \n" "vst1.f32 {d6-d7}, [%5] \n" "vst1.f32 {d12-d13}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm0_4), // %1 "=r"(r0_tm1_0), // %2 "=r"(r0_tm1_4), // %3 "=r"(r0_tm2_0), // %4 "=r"(r0_tm2_4), // %5 "=r"(r0_tm3_0), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm0_4), "2"(r0_tm1_0), "3"(r0_tm1_4), "4"(r0_tm2_0), "5"(r0_tm2_4), "6"(r0_tm3_0), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_0[1] = tmp12a + tmp12b; r0_tm_0[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_0[3] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_4[1] = tmp56a + tmp56b; r0_tm_4[2] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 2; r0_tm_4 += img0_tm.w * tiles * 2; } #endif // __ARM_NEON } } } } } }
dragonfly3_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * based on rawSHA256_fmt.c code * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * The DragonFly BSD 2.10.1-REL crypt-sha2 hashes are seriously broken. See * http://www.openwall.com/lists/john-dev/2012/01/16/1 * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_dragonfly3_32; extern struct fmt_main fmt_dragonfly3_64; #elif FMT_REGISTERS_H john_register_one(&fmt_dragonfly3_32); john_register_one(&fmt_dragonfly3_64); #else #include "sha2.h" #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 4096 // tuned on K8-dual HT #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL_32 "dragonfly3-32" #define FORMAT_LABEL_64 "dragonfly3-64" #define FORMAT_NAME_32 "DragonFly BSD $3$ w/ bug, 32-bit" #define FORMAT_NAME_64 "DragonFly BSD $3$ w/ bug, 64-bit" #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #define FORMAT_TAG "$3$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 44 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE_32 (1+4+8) // 1st char is length #define SALT_SIZE_64 (1+8+8) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests_32[] = { {"$3$z$EBG66iBCGfUfENOfqLUH/r9xQxI1cG373/hRop6j.oWs", "magnum"}, {"$3$f6daU5$Xf/u8pKp.sb4VCLKz7tTZMUKJ3J4oOfZgUSHYOFL.M0n", ""}, {"$3$PNPA2tJ$ppD4bXqPMYFVdYVYrxXGMWeYB6Xv8e6jmXbvrB5V.okl", "password"}, {"$3$jWhDSrS$bad..Dy7UAyabPyfrEi3fgQ2qtT.5fE7C5EMNo/n.Qk5", "John the Ripper"}, {"$3$SSYEHO$hkuDmUQHT2Tr0.ai.lUVyb9bCC875Up.CZVa6UJZ.Muv", "DragonFly BSD"}, {"$3$pomO$a2ltqo.LlUSt1DG68sv2FZOdLcul0gYQ3xmn6z0G.I6Y", "123"}, {"$3$F$8Asqp58WwQ3WDMhaR3yQMSJGdCtpBqckemkCSNnJ.gRr", "12345678"}, {NULL} }; static struct fmt_tests tests_64[] = { {"$3$z$sNV7KLtLxvJRsj2MfBtGZFuzXP3CECITaFq/rvsy.Y.Q", "magnum"}, {"$3$f6daU5$eV2SX9vUHTMsoy3Ic7cWiQ4mOxyuyenGjYQWkJmy.AF3", ""}, {"$3$PNPA2tJ$GvXjg6zSge3YDh5I35JlYZHoQS2r0/.vn36fQzSY.A0d", "password"}, {"$3$jWhDSrS$5yBH7KFPmsg.PhPeDMj1MY4fv9061zdbYumPe2Ve.Y5J", "John the Ripper"}, {"$3$SSYEHO$AMYLyanRYs8F2U07FsBrSFuOIygJ4kgqvpBB17BI.61N", "DragonFly BSD"}, {"$3$e$TzMK1ePmjnZI/YbGes/1PAKqbj8aOV31Hf8Tz9es.kkq", "123"}, {"$3$XcMa$idKoaBQXdRlhfJFDjnV0jDryW/nEBAGXONyzJvnH.cR3", "12345678"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out) [(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)]; static char *cur_salt; static int salt_len; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *start; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; for (pos = ciphertext; *pos && *pos != '$'; pos++); if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0; start = ++pos; while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++; if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0; return 1; } #define TO_BINARY(b1, b2, b3) \ value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out[b1] = value >> 16; \ out[b2] = value >> 8; \ out[b3] = value; static void *get_binary(char *ciphertext) { static ARCH_WORD_32 outbuf[BINARY_SIZE/4]; ARCH_WORD_32 value; char *pos; unsigned char *out = (unsigned char*)outbuf; int i; pos = strrchr(ciphertext, '$') + 1; for (i = 0; i < 10; i++) { TO_BINARY(i, i + 11, i + 21); } value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); out[10] = value >> 16; out[31] = value >> 8; return (void *)out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { SHA256_CTX ctx; SHA256_Init(&ctx); /* First the password */ SHA256_Update(&ctx, saved_key[index], saved_len[index]); /* Then the salt, including the $3$ magic */ SHA256_Update(&ctx, cur_salt, salt_len); SHA256_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static void set_salt(void *salt) { salt_len = (int)*(char*)salt; cur_salt = (char*)salt + 1; } // For 32-bit version of the bug, our magic is "$3$\0" len 4 static void *get_salt_32(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_32); ciphertext += FORMAT_TAG_LEN; strcpy(&out[1], FORMAT_TAG); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[5], ciphertext, len); out[0] = len + 4; return out; } // For 64-bit version of the bug, our magic is "$3$\0sha5" len 8 static void *get_salt_64(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_64); ciphertext += FORMAT_TAG_LEN; memcpy(&out[1], "$3$\0sha5", 8); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[9], ciphertext, len); out[0] = len + 8; return out; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { unsigned char *s = (unsigned char*)salt + 1; unsigned int hash = 5381; unsigned int i; for (i = 0; i < *(unsigned char*)salt; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_dragonfly3_32 = { { FORMAT_LABEL_32, FORMAT_NAME_32, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_32, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, { FORMAT_TAG }, tests_32 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_32, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_dragonfly3_64 = { { FORMAT_LABEL_64, FORMAT_NAME_64, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_64, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, { NULL }, tests_64 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_64, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
acado_integrator.c
/* * This file was auto-generated using the ACADO Toolkit. * * While ACADO Toolkit is free software released under the terms of * the GNU Lesser General Public License (LGPL), the generated code * as such remains the property of the user who used ACADO Toolkit * to generate this code. In particular, user dependent data of the code * do not inherit the GNU LGPL license. On the other hand, parts of the * generated code that are a direct copy of source code from the * ACADO Toolkit or the software tools it is based on, remain, as derived * work, automatically covered by the LGPL license. * * ACADO Toolkit is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include "acado_common.h" real_t rk_dim20_swap; /** Column vector of size: 20 */ real_t rk_dim20_bPerm[ 20 ]; real_t rk_ttt; /** Row vector of size: 24 */ real_t rk_xxx[ 24 ]; /** Matrix of size: 10 x 2 (row major format) */ real_t rk_kkk[ 20 ]; /** Matrix of size: 20 x 20 (row major format) */ real_t rk_A[ 400 ]; /** Column vector of size: 20 */ real_t rk_b[ 20 ]; /** Row vector of size: 20 */ int rk_dim20_perm[ 20 ]; /** Column vector of size: 10 */ real_t rk_rhsTemp[ 10 ]; /** Matrix of size: 2 x 140 (row major format) */ real_t rk_diffsTemp2[ 280 ]; /** Matrix of size: 10 x 2 (row major format) */ real_t rk_diffK[ 20 ]; /** Matrix of size: 10 x 14 (row major format) */ real_t rk_diffsNew2[ 140 ]; #pragma omp threadprivate( auxVar, rk_ttt, rk_xxx, rk_kkk, rk_diffK, rk_rhsTemp, rk_dim20_perm, rk_A, rk_b, rk_diffsNew2, rk_diffsTemp2, rk_dim20_swap, rk_dim20_bPerm ) void acado_rhs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 10; /* Compute outputs: */ out[0] = xd[7]; out[1] = xd[8]; out[2] = xd[9]; out[3] = ((real_t)(5.0000000000000000e-01)*(((((real_t)(0.0000000000000000e+00)-u[1])*xd[4])-(u[2]*xd[5]))-(u[3]*xd[6]))); out[4] = ((real_t)(5.0000000000000000e-01)*(((u[1]*xd[3])+(u[3]*xd[5]))-(u[2]*xd[6]))); out[5] = ((real_t)(5.0000000000000000e-01)*(((u[2]*xd[3])-(u[3]*xd[4]))+(u[1]*xd[6]))); out[6] = ((real_t)(5.0000000000000000e-01)*(((u[3]*xd[3])+(u[2]*xd[4]))-(u[1]*xd[5]))); out[7] = (((real_t)(2.0000000000000000e+00)*((xd[3]*xd[5])+(xd[4]*xd[6])))*u[0]); out[8] = (((real_t)(2.0000000000000000e+00)*((xd[5]*xd[6])-(xd[3]*xd[4])))*u[0]); out[9] = (((((real_t)(1.0000000000000000e+00)-(((real_t)(2.0000000000000000e+00)*xd[4])*xd[4]))-(((real_t)(2.0000000000000000e+00)*xd[5])*xd[5]))*u[0])-(real_t)(9.8065999999999995e+00)); } void acado_diffs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 10; /* Compute outputs: */ out[0] = (real_t)(0.0000000000000000e+00); out[1] = (real_t)(0.0000000000000000e+00); out[2] = (real_t)(0.0000000000000000e+00); out[3] = (real_t)(0.0000000000000000e+00); out[4] = (real_t)(0.0000000000000000e+00); out[5] = (real_t)(0.0000000000000000e+00); out[6] = (real_t)(0.0000000000000000e+00); out[7] = (real_t)(1.0000000000000000e+00); out[8] = (real_t)(0.0000000000000000e+00); out[9] = (real_t)(0.0000000000000000e+00); out[10] = (real_t)(0.0000000000000000e+00); out[11] = (real_t)(0.0000000000000000e+00); out[12] = (real_t)(0.0000000000000000e+00); out[13] = (real_t)(0.0000000000000000e+00); out[14] = (real_t)(0.0000000000000000e+00); out[15] = (real_t)(0.0000000000000000e+00); out[16] = (real_t)(0.0000000000000000e+00); out[17] = (real_t)(0.0000000000000000e+00); out[18] = (real_t)(0.0000000000000000e+00); out[19] = (real_t)(0.0000000000000000e+00); out[20] = (real_t)(0.0000000000000000e+00); out[21] = (real_t)(0.0000000000000000e+00); out[22] = (real_t)(1.0000000000000000e+00); out[23] = (real_t)(0.0000000000000000e+00); out[24] = (real_t)(0.0000000000000000e+00); out[25] = (real_t)(0.0000000000000000e+00); out[26] = (real_t)(0.0000000000000000e+00); out[27] = (real_t)(0.0000000000000000e+00); out[28] = (real_t)(0.0000000000000000e+00); out[29] = (real_t)(0.0000000000000000e+00); out[30] = (real_t)(0.0000000000000000e+00); out[31] = (real_t)(0.0000000000000000e+00); out[32] = (real_t)(0.0000000000000000e+00); out[33] = (real_t)(0.0000000000000000e+00); out[34] = (real_t)(0.0000000000000000e+00); out[35] = (real_t)(0.0000000000000000e+00); out[36] = (real_t)(0.0000000000000000e+00); out[37] = (real_t)(1.0000000000000000e+00); out[38] = (real_t)(0.0000000000000000e+00); out[39] = (real_t)(0.0000000000000000e+00); out[40] = (real_t)(0.0000000000000000e+00); out[41] = (real_t)(0.0000000000000000e+00); out[42] = (real_t)(0.0000000000000000e+00); out[43] = (real_t)(0.0000000000000000e+00); out[44] = (real_t)(0.0000000000000000e+00); out[45] = (real_t)(0.0000000000000000e+00); out[46] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-u[1])); out[47] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-u[2])); out[48] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-u[3])); out[49] = (real_t)(0.0000000000000000e+00); out[50] = (real_t)(0.0000000000000000e+00); out[51] = (real_t)(0.0000000000000000e+00); out[52] = (real_t)(0.0000000000000000e+00); out[53] = ((real_t)(5.0000000000000000e-01)*(((real_t)(0.0000000000000000e+00)-(real_t)(1.0000000000000000e+00))*xd[4])); out[54] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-xd[5])); out[55] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-xd[6])); out[56] = (real_t)(0.0000000000000000e+00); out[57] = (real_t)(0.0000000000000000e+00); out[58] = (real_t)(0.0000000000000000e+00); out[59] = ((real_t)(5.0000000000000000e-01)*u[1]); out[60] = (real_t)(0.0000000000000000e+00); out[61] = ((real_t)(5.0000000000000000e-01)*u[3]); out[62] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-u[2])); out[63] = (real_t)(0.0000000000000000e+00); out[64] = (real_t)(0.0000000000000000e+00); out[65] = (real_t)(0.0000000000000000e+00); out[66] = (real_t)(0.0000000000000000e+00); out[67] = ((real_t)(5.0000000000000000e-01)*xd[3]); out[68] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-xd[6])); out[69] = ((real_t)(5.0000000000000000e-01)*xd[5]); out[70] = (real_t)(0.0000000000000000e+00); out[71] = (real_t)(0.0000000000000000e+00); out[72] = (real_t)(0.0000000000000000e+00); out[73] = ((real_t)(5.0000000000000000e-01)*u[2]); out[74] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-u[3])); out[75] = (real_t)(0.0000000000000000e+00); out[76] = ((real_t)(5.0000000000000000e-01)*u[1]); out[77] = (real_t)(0.0000000000000000e+00); out[78] = (real_t)(0.0000000000000000e+00); out[79] = (real_t)(0.0000000000000000e+00); out[80] = (real_t)(0.0000000000000000e+00); out[81] = ((real_t)(5.0000000000000000e-01)*xd[6]); out[82] = ((real_t)(5.0000000000000000e-01)*xd[3]); out[83] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-xd[4])); out[84] = (real_t)(0.0000000000000000e+00); out[85] = (real_t)(0.0000000000000000e+00); out[86] = (real_t)(0.0000000000000000e+00); out[87] = ((real_t)(5.0000000000000000e-01)*u[3]); out[88] = ((real_t)(5.0000000000000000e-01)*u[2]); out[89] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-u[1])); out[90] = (real_t)(0.0000000000000000e+00); out[91] = (real_t)(0.0000000000000000e+00); out[92] = (real_t)(0.0000000000000000e+00); out[93] = (real_t)(0.0000000000000000e+00); out[94] = (real_t)(0.0000000000000000e+00); out[95] = ((real_t)(5.0000000000000000e-01)*((real_t)(0.0000000000000000e+00)-xd[5])); out[96] = ((real_t)(5.0000000000000000e-01)*xd[4]); out[97] = ((real_t)(5.0000000000000000e-01)*xd[3]); out[98] = (real_t)(0.0000000000000000e+00); out[99] = (real_t)(0.0000000000000000e+00); out[100] = (real_t)(0.0000000000000000e+00); out[101] = (((real_t)(2.0000000000000000e+00)*xd[5])*u[0]); out[102] = (((real_t)(2.0000000000000000e+00)*xd[6])*u[0]); out[103] = (((real_t)(2.0000000000000000e+00)*xd[3])*u[0]); out[104] = (((real_t)(2.0000000000000000e+00)*xd[4])*u[0]); out[105] = (real_t)(0.0000000000000000e+00); out[106] = (real_t)(0.0000000000000000e+00); out[107] = (real_t)(0.0000000000000000e+00); out[108] = ((real_t)(2.0000000000000000e+00)*((xd[3]*xd[5])+(xd[4]*xd[6]))); out[109] = (real_t)(0.0000000000000000e+00); out[110] = (real_t)(0.0000000000000000e+00); out[111] = (real_t)(0.0000000000000000e+00); out[112] = (real_t)(0.0000000000000000e+00); out[113] = (real_t)(0.0000000000000000e+00); out[114] = (real_t)(0.0000000000000000e+00); out[115] = (((real_t)(2.0000000000000000e+00)*((real_t)(0.0000000000000000e+00)-xd[4]))*u[0]); out[116] = (((real_t)(2.0000000000000000e+00)*((real_t)(0.0000000000000000e+00)-xd[3]))*u[0]); out[117] = (((real_t)(2.0000000000000000e+00)*xd[6])*u[0]); out[118] = (((real_t)(2.0000000000000000e+00)*xd[5])*u[0]); out[119] = (real_t)(0.0000000000000000e+00); out[120] = (real_t)(0.0000000000000000e+00); out[121] = (real_t)(0.0000000000000000e+00); out[122] = ((real_t)(2.0000000000000000e+00)*((xd[5]*xd[6])-(xd[3]*xd[4]))); out[123] = (real_t)(0.0000000000000000e+00); out[124] = (real_t)(0.0000000000000000e+00); out[125] = (real_t)(0.0000000000000000e+00); out[126] = (real_t)(0.0000000000000000e+00); out[127] = (real_t)(0.0000000000000000e+00); out[128] = (real_t)(0.0000000000000000e+00); out[129] = (real_t)(0.0000000000000000e+00); out[130] = (((real_t)(0.0000000000000000e+00)-(((real_t)(2.0000000000000000e+00)*xd[4])+((real_t)(2.0000000000000000e+00)*xd[4])))*u[0]); out[131] = (((real_t)(0.0000000000000000e+00)-(((real_t)(2.0000000000000000e+00)*xd[5])+((real_t)(2.0000000000000000e+00)*xd[5])))*u[0]); out[132] = (real_t)(0.0000000000000000e+00); out[133] = (real_t)(0.0000000000000000e+00); out[134] = (real_t)(0.0000000000000000e+00); out[135] = (real_t)(0.0000000000000000e+00); out[136] = (((real_t)(1.0000000000000000e+00)-(((real_t)(2.0000000000000000e+00)*xd[4])*xd[4]))-(((real_t)(2.0000000000000000e+00)*xd[5])*xd[5])); out[137] = (real_t)(0.0000000000000000e+00); out[138] = (real_t)(0.0000000000000000e+00); out[139] = (real_t)(0.0000000000000000e+00); } void acado_solve_dim20_triangular( real_t* const A, real_t* const b ) { b[19] = b[19]/A[399]; b[18] -= + A[379]*b[19]; b[18] = b[18]/A[378]; b[17] -= + A[359]*b[19]; b[17] -= + A[358]*b[18]; b[17] = b[17]/A[357]; b[16] -= + A[339]*b[19]; b[16] -= + A[338]*b[18]; b[16] -= + A[337]*b[17]; b[16] = b[16]/A[336]; b[15] -= + A[319]*b[19]; b[15] -= + A[318]*b[18]; b[15] -= + A[317]*b[17]; b[15] -= + A[316]*b[16]; b[15] = b[15]/A[315]; b[14] -= + A[299]*b[19]; b[14] -= + A[298]*b[18]; b[14] -= + A[297]*b[17]; b[14] -= + A[296]*b[16]; b[14] -= + A[295]*b[15]; b[14] = b[14]/A[294]; b[13] -= + A[279]*b[19]; b[13] -= + A[278]*b[18]; b[13] -= + A[277]*b[17]; b[13] -= + A[276]*b[16]; b[13] -= + A[275]*b[15]; b[13] -= + A[274]*b[14]; b[13] = b[13]/A[273]; b[12] -= + A[259]*b[19]; b[12] -= + A[258]*b[18]; b[12] -= + A[257]*b[17]; b[12] -= + A[256]*b[16]; b[12] -= + A[255]*b[15]; b[12] -= + A[254]*b[14]; b[12] -= + A[253]*b[13]; b[12] = b[12]/A[252]; b[11] -= + A[239]*b[19]; b[11] -= + A[238]*b[18]; b[11] -= + A[237]*b[17]; b[11] -= + A[236]*b[16]; b[11] -= + A[235]*b[15]; b[11] -= + A[234]*b[14]; b[11] -= + A[233]*b[13]; b[11] -= + A[232]*b[12]; b[11] = b[11]/A[231]; b[10] -= + A[219]*b[19]; b[10] -= + A[218]*b[18]; b[10] -= + A[217]*b[17]; b[10] -= + A[216]*b[16]; b[10] -= + A[215]*b[15]; b[10] -= + A[214]*b[14]; b[10] -= + A[213]*b[13]; b[10] -= + A[212]*b[12]; b[10] -= + A[211]*b[11]; b[10] = b[10]/A[210]; b[9] -= + A[199]*b[19]; b[9] -= + A[198]*b[18]; b[9] -= + A[197]*b[17]; b[9] -= + A[196]*b[16]; b[9] -= + A[195]*b[15]; b[9] -= + A[194]*b[14]; b[9] -= + A[193]*b[13]; b[9] -= + A[192]*b[12]; b[9] -= + A[191]*b[11]; b[9] -= + A[190]*b[10]; b[9] = b[9]/A[189]; b[8] -= + A[179]*b[19]; b[8] -= + A[178]*b[18]; b[8] -= + A[177]*b[17]; b[8] -= + A[176]*b[16]; b[8] -= + A[175]*b[15]; b[8] -= + A[174]*b[14]; b[8] -= + A[173]*b[13]; b[8] -= + A[172]*b[12]; b[8] -= + A[171]*b[11]; b[8] -= + A[170]*b[10]; b[8] -= + A[169]*b[9]; b[8] = b[8]/A[168]; b[7] -= + A[159]*b[19]; b[7] -= + A[158]*b[18]; b[7] -= + A[157]*b[17]; b[7] -= + A[156]*b[16]; b[7] -= + A[155]*b[15]; b[7] -= + A[154]*b[14]; b[7] -= + A[153]*b[13]; b[7] -= + A[152]*b[12]; b[7] -= + A[151]*b[11]; b[7] -= + A[150]*b[10]; b[7] -= + A[149]*b[9]; b[7] -= + A[148]*b[8]; b[7] = b[7]/A[147]; b[6] -= + A[139]*b[19]; b[6] -= + A[138]*b[18]; b[6] -= + A[137]*b[17]; b[6] -= + A[136]*b[16]; b[6] -= + A[135]*b[15]; b[6] -= + A[134]*b[14]; b[6] -= + A[133]*b[13]; b[6] -= + A[132]*b[12]; b[6] -= + A[131]*b[11]; b[6] -= + A[130]*b[10]; b[6] -= + A[129]*b[9]; b[6] -= + A[128]*b[8]; b[6] -= + A[127]*b[7]; b[6] = b[6]/A[126]; b[5] -= + A[119]*b[19]; b[5] -= + A[118]*b[18]; b[5] -= + A[117]*b[17]; b[5] -= + A[116]*b[16]; b[5] -= + A[115]*b[15]; b[5] -= + A[114]*b[14]; b[5] -= + A[113]*b[13]; b[5] -= + A[112]*b[12]; b[5] -= + A[111]*b[11]; b[5] -= + A[110]*b[10]; b[5] -= + A[109]*b[9]; b[5] -= + A[108]*b[8]; b[5] -= + A[107]*b[7]; b[5] -= + A[106]*b[6]; b[5] = b[5]/A[105]; b[4] -= + A[99]*b[19]; b[4] -= + A[98]*b[18]; b[4] -= + A[97]*b[17]; b[4] -= + A[96]*b[16]; b[4] -= + A[95]*b[15]; b[4] -= + A[94]*b[14]; b[4] -= + A[93]*b[13]; b[4] -= + A[92]*b[12]; b[4] -= + A[91]*b[11]; b[4] -= + A[90]*b[10]; b[4] -= + A[89]*b[9]; b[4] -= + A[88]*b[8]; b[4] -= + A[87]*b[7]; b[4] -= + A[86]*b[6]; b[4] -= + A[85]*b[5]; b[4] = b[4]/A[84]; b[3] -= + A[79]*b[19]; b[3] -= + A[78]*b[18]; b[3] -= + A[77]*b[17]; b[3] -= + A[76]*b[16]; b[3] -= + A[75]*b[15]; b[3] -= + A[74]*b[14]; b[3] -= + A[73]*b[13]; b[3] -= + A[72]*b[12]; b[3] -= + A[71]*b[11]; b[3] -= + A[70]*b[10]; b[3] -= + A[69]*b[9]; b[3] -= + A[68]*b[8]; b[3] -= + A[67]*b[7]; b[3] -= + A[66]*b[6]; b[3] -= + A[65]*b[5]; b[3] -= + A[64]*b[4]; b[3] = b[3]/A[63]; b[2] -= + A[59]*b[19]; b[2] -= + A[58]*b[18]; b[2] -= + A[57]*b[17]; b[2] -= + A[56]*b[16]; b[2] -= + A[55]*b[15]; b[2] -= + A[54]*b[14]; b[2] -= + A[53]*b[13]; b[2] -= + A[52]*b[12]; b[2] -= + A[51]*b[11]; b[2] -= + A[50]*b[10]; b[2] -= + A[49]*b[9]; b[2] -= + A[48]*b[8]; b[2] -= + A[47]*b[7]; b[2] -= + A[46]*b[6]; b[2] -= + A[45]*b[5]; b[2] -= + A[44]*b[4]; b[2] -= + A[43]*b[3]; b[2] = b[2]/A[42]; b[1] -= + A[39]*b[19]; b[1] -= + A[38]*b[18]; b[1] -= + A[37]*b[17]; b[1] -= + A[36]*b[16]; b[1] -= + A[35]*b[15]; b[1] -= + A[34]*b[14]; b[1] -= + A[33]*b[13]; b[1] -= + A[32]*b[12]; b[1] -= + A[31]*b[11]; b[1] -= + A[30]*b[10]; b[1] -= + A[29]*b[9]; b[1] -= + A[28]*b[8]; b[1] -= + A[27]*b[7]; b[1] -= + A[26]*b[6]; b[1] -= + A[25]*b[5]; b[1] -= + A[24]*b[4]; b[1] -= + A[23]*b[3]; b[1] -= + A[22]*b[2]; b[1] = b[1]/A[21]; b[0] -= + A[19]*b[19]; b[0] -= + A[18]*b[18]; b[0] -= + A[17]*b[17]; b[0] -= + A[16]*b[16]; b[0] -= + A[15]*b[15]; b[0] -= + A[14]*b[14]; b[0] -= + A[13]*b[13]; b[0] -= + A[12]*b[12]; b[0] -= + A[11]*b[11]; b[0] -= + A[10]*b[10]; b[0] -= + A[9]*b[9]; b[0] -= + A[8]*b[8]; b[0] -= + A[7]*b[7]; b[0] -= + A[6]*b[6]; b[0] -= + A[5]*b[5]; b[0] -= + A[4]*b[4]; b[0] -= + A[3]*b[3]; b[0] -= + A[2]*b[2]; b[0] -= + A[1]*b[1]; b[0] = b[0]/A[0]; } real_t acado_solve_dim20_system( real_t* const A, real_t* const b, int* const rk_perm ) { real_t det; int i; int j; int k; int indexMax; int intSwap; real_t valueMax; real_t temp; for (i = 0; i < 20; ++i) { rk_perm[i] = i; } det = 1.0000000000000000e+00; for( i=0; i < (19); i++ ) { indexMax = i; valueMax = fabs(A[i*20+i]); for( j=(i+1); j < 20; j++ ) { temp = fabs(A[j*20+i]); if( temp > valueMax ) { indexMax = j; valueMax = temp; } } if( indexMax > i ) { for (k = 0; k < 20; ++k) { rk_dim20_swap = A[i*20+k]; A[i*20+k] = A[indexMax*20+k]; A[indexMax*20+k] = rk_dim20_swap; } rk_dim20_swap = b[i]; b[i] = b[indexMax]; b[indexMax] = rk_dim20_swap; intSwap = rk_perm[i]; rk_perm[i] = rk_perm[indexMax]; rk_perm[indexMax] = intSwap; } det *= A[i*20+i]; for( j=i+1; j < 20; j++ ) { A[j*20+i] = -A[j*20+i]/A[i*20+i]; for( k=i+1; k < 20; k++ ) { A[j*20+k] += A[j*20+i] * A[i*20+k]; } b[j] += A[j*20+i] * b[i]; } } det *= A[399]; det = fabs(det); acado_solve_dim20_triangular( A, b ); return det; } void acado_solve_dim20_system_reuse( real_t* const A, real_t* const b, int* const rk_perm ) { rk_dim20_bPerm[0] = b[rk_perm[0]]; rk_dim20_bPerm[1] = b[rk_perm[1]]; rk_dim20_bPerm[2] = b[rk_perm[2]]; rk_dim20_bPerm[3] = b[rk_perm[3]]; rk_dim20_bPerm[4] = b[rk_perm[4]]; rk_dim20_bPerm[5] = b[rk_perm[5]]; rk_dim20_bPerm[6] = b[rk_perm[6]]; rk_dim20_bPerm[7] = b[rk_perm[7]]; rk_dim20_bPerm[8] = b[rk_perm[8]]; rk_dim20_bPerm[9] = b[rk_perm[9]]; rk_dim20_bPerm[10] = b[rk_perm[10]]; rk_dim20_bPerm[11] = b[rk_perm[11]]; rk_dim20_bPerm[12] = b[rk_perm[12]]; rk_dim20_bPerm[13] = b[rk_perm[13]]; rk_dim20_bPerm[14] = b[rk_perm[14]]; rk_dim20_bPerm[15] = b[rk_perm[15]]; rk_dim20_bPerm[16] = b[rk_perm[16]]; rk_dim20_bPerm[17] = b[rk_perm[17]]; rk_dim20_bPerm[18] = b[rk_perm[18]]; rk_dim20_bPerm[19] = b[rk_perm[19]]; rk_dim20_bPerm[1] += A[20]*rk_dim20_bPerm[0]; rk_dim20_bPerm[2] += A[40]*rk_dim20_bPerm[0]; rk_dim20_bPerm[2] += A[41]*rk_dim20_bPerm[1]; rk_dim20_bPerm[3] += A[60]*rk_dim20_bPerm[0]; rk_dim20_bPerm[3] += A[61]*rk_dim20_bPerm[1]; rk_dim20_bPerm[3] += A[62]*rk_dim20_bPerm[2]; rk_dim20_bPerm[4] += A[80]*rk_dim20_bPerm[0]; rk_dim20_bPerm[4] += A[81]*rk_dim20_bPerm[1]; rk_dim20_bPerm[4] += A[82]*rk_dim20_bPerm[2]; rk_dim20_bPerm[4] += A[83]*rk_dim20_bPerm[3]; rk_dim20_bPerm[5] += A[100]*rk_dim20_bPerm[0]; rk_dim20_bPerm[5] += A[101]*rk_dim20_bPerm[1]; rk_dim20_bPerm[5] += A[102]*rk_dim20_bPerm[2]; rk_dim20_bPerm[5] += A[103]*rk_dim20_bPerm[3]; rk_dim20_bPerm[5] += A[104]*rk_dim20_bPerm[4]; rk_dim20_bPerm[6] += A[120]*rk_dim20_bPerm[0]; rk_dim20_bPerm[6] += A[121]*rk_dim20_bPerm[1]; rk_dim20_bPerm[6] += A[122]*rk_dim20_bPerm[2]; rk_dim20_bPerm[6] += A[123]*rk_dim20_bPerm[3]; rk_dim20_bPerm[6] += A[124]*rk_dim20_bPerm[4]; rk_dim20_bPerm[6] += A[125]*rk_dim20_bPerm[5]; rk_dim20_bPerm[7] += A[140]*rk_dim20_bPerm[0]; rk_dim20_bPerm[7] += A[141]*rk_dim20_bPerm[1]; rk_dim20_bPerm[7] += A[142]*rk_dim20_bPerm[2]; rk_dim20_bPerm[7] += A[143]*rk_dim20_bPerm[3]; rk_dim20_bPerm[7] += A[144]*rk_dim20_bPerm[4]; rk_dim20_bPerm[7] += A[145]*rk_dim20_bPerm[5]; rk_dim20_bPerm[7] += A[146]*rk_dim20_bPerm[6]; rk_dim20_bPerm[8] += A[160]*rk_dim20_bPerm[0]; rk_dim20_bPerm[8] += A[161]*rk_dim20_bPerm[1]; rk_dim20_bPerm[8] += A[162]*rk_dim20_bPerm[2]; rk_dim20_bPerm[8] += A[163]*rk_dim20_bPerm[3]; rk_dim20_bPerm[8] += A[164]*rk_dim20_bPerm[4]; rk_dim20_bPerm[8] += A[165]*rk_dim20_bPerm[5]; rk_dim20_bPerm[8] += A[166]*rk_dim20_bPerm[6]; rk_dim20_bPerm[8] += A[167]*rk_dim20_bPerm[7]; rk_dim20_bPerm[9] += A[180]*rk_dim20_bPerm[0]; rk_dim20_bPerm[9] += A[181]*rk_dim20_bPerm[1]; rk_dim20_bPerm[9] += A[182]*rk_dim20_bPerm[2]; rk_dim20_bPerm[9] += A[183]*rk_dim20_bPerm[3]; rk_dim20_bPerm[9] += A[184]*rk_dim20_bPerm[4]; rk_dim20_bPerm[9] += A[185]*rk_dim20_bPerm[5]; rk_dim20_bPerm[9] += A[186]*rk_dim20_bPerm[6]; rk_dim20_bPerm[9] += A[187]*rk_dim20_bPerm[7]; rk_dim20_bPerm[9] += A[188]*rk_dim20_bPerm[8]; rk_dim20_bPerm[10] += A[200]*rk_dim20_bPerm[0]; rk_dim20_bPerm[10] += A[201]*rk_dim20_bPerm[1]; rk_dim20_bPerm[10] += A[202]*rk_dim20_bPerm[2]; rk_dim20_bPerm[10] += A[203]*rk_dim20_bPerm[3]; rk_dim20_bPerm[10] += A[204]*rk_dim20_bPerm[4]; rk_dim20_bPerm[10] += A[205]*rk_dim20_bPerm[5]; rk_dim20_bPerm[10] += A[206]*rk_dim20_bPerm[6]; rk_dim20_bPerm[10] += A[207]*rk_dim20_bPerm[7]; rk_dim20_bPerm[10] += A[208]*rk_dim20_bPerm[8]; rk_dim20_bPerm[10] += A[209]*rk_dim20_bPerm[9]; rk_dim20_bPerm[11] += A[220]*rk_dim20_bPerm[0]; rk_dim20_bPerm[11] += A[221]*rk_dim20_bPerm[1]; rk_dim20_bPerm[11] += A[222]*rk_dim20_bPerm[2]; rk_dim20_bPerm[11] += A[223]*rk_dim20_bPerm[3]; rk_dim20_bPerm[11] += A[224]*rk_dim20_bPerm[4]; rk_dim20_bPerm[11] += A[225]*rk_dim20_bPerm[5]; rk_dim20_bPerm[11] += A[226]*rk_dim20_bPerm[6]; rk_dim20_bPerm[11] += A[227]*rk_dim20_bPerm[7]; rk_dim20_bPerm[11] += A[228]*rk_dim20_bPerm[8]; rk_dim20_bPerm[11] += A[229]*rk_dim20_bPerm[9]; rk_dim20_bPerm[11] += A[230]*rk_dim20_bPerm[10]; rk_dim20_bPerm[12] += A[240]*rk_dim20_bPerm[0]; rk_dim20_bPerm[12] += A[241]*rk_dim20_bPerm[1]; rk_dim20_bPerm[12] += A[242]*rk_dim20_bPerm[2]; rk_dim20_bPerm[12] += A[243]*rk_dim20_bPerm[3]; rk_dim20_bPerm[12] += A[244]*rk_dim20_bPerm[4]; rk_dim20_bPerm[12] += A[245]*rk_dim20_bPerm[5]; rk_dim20_bPerm[12] += A[246]*rk_dim20_bPerm[6]; rk_dim20_bPerm[12] += A[247]*rk_dim20_bPerm[7]; rk_dim20_bPerm[12] += A[248]*rk_dim20_bPerm[8]; rk_dim20_bPerm[12] += A[249]*rk_dim20_bPerm[9]; rk_dim20_bPerm[12] += A[250]*rk_dim20_bPerm[10]; rk_dim20_bPerm[12] += A[251]*rk_dim20_bPerm[11]; rk_dim20_bPerm[13] += A[260]*rk_dim20_bPerm[0]; rk_dim20_bPerm[13] += A[261]*rk_dim20_bPerm[1]; rk_dim20_bPerm[13] += A[262]*rk_dim20_bPerm[2]; rk_dim20_bPerm[13] += A[263]*rk_dim20_bPerm[3]; rk_dim20_bPerm[13] += A[264]*rk_dim20_bPerm[4]; rk_dim20_bPerm[13] += A[265]*rk_dim20_bPerm[5]; rk_dim20_bPerm[13] += A[266]*rk_dim20_bPerm[6]; rk_dim20_bPerm[13] += A[267]*rk_dim20_bPerm[7]; rk_dim20_bPerm[13] += A[268]*rk_dim20_bPerm[8]; rk_dim20_bPerm[13] += A[269]*rk_dim20_bPerm[9]; rk_dim20_bPerm[13] += A[270]*rk_dim20_bPerm[10]; rk_dim20_bPerm[13] += A[271]*rk_dim20_bPerm[11]; rk_dim20_bPerm[13] += A[272]*rk_dim20_bPerm[12]; rk_dim20_bPerm[14] += A[280]*rk_dim20_bPerm[0]; rk_dim20_bPerm[14] += A[281]*rk_dim20_bPerm[1]; rk_dim20_bPerm[14] += A[282]*rk_dim20_bPerm[2]; rk_dim20_bPerm[14] += A[283]*rk_dim20_bPerm[3]; rk_dim20_bPerm[14] += A[284]*rk_dim20_bPerm[4]; rk_dim20_bPerm[14] += A[285]*rk_dim20_bPerm[5]; rk_dim20_bPerm[14] += A[286]*rk_dim20_bPerm[6]; rk_dim20_bPerm[14] += A[287]*rk_dim20_bPerm[7]; rk_dim20_bPerm[14] += A[288]*rk_dim20_bPerm[8]; rk_dim20_bPerm[14] += A[289]*rk_dim20_bPerm[9]; rk_dim20_bPerm[14] += A[290]*rk_dim20_bPerm[10]; rk_dim20_bPerm[14] += A[291]*rk_dim20_bPerm[11]; rk_dim20_bPerm[14] += A[292]*rk_dim20_bPerm[12]; rk_dim20_bPerm[14] += A[293]*rk_dim20_bPerm[13]; rk_dim20_bPerm[15] += A[300]*rk_dim20_bPerm[0]; rk_dim20_bPerm[15] += A[301]*rk_dim20_bPerm[1]; rk_dim20_bPerm[15] += A[302]*rk_dim20_bPerm[2]; rk_dim20_bPerm[15] += A[303]*rk_dim20_bPerm[3]; rk_dim20_bPerm[15] += A[304]*rk_dim20_bPerm[4]; rk_dim20_bPerm[15] += A[305]*rk_dim20_bPerm[5]; rk_dim20_bPerm[15] += A[306]*rk_dim20_bPerm[6]; rk_dim20_bPerm[15] += A[307]*rk_dim20_bPerm[7]; rk_dim20_bPerm[15] += A[308]*rk_dim20_bPerm[8]; rk_dim20_bPerm[15] += A[309]*rk_dim20_bPerm[9]; rk_dim20_bPerm[15] += A[310]*rk_dim20_bPerm[10]; rk_dim20_bPerm[15] += A[311]*rk_dim20_bPerm[11]; rk_dim20_bPerm[15] += A[312]*rk_dim20_bPerm[12]; rk_dim20_bPerm[15] += A[313]*rk_dim20_bPerm[13]; rk_dim20_bPerm[15] += A[314]*rk_dim20_bPerm[14]; rk_dim20_bPerm[16] += A[320]*rk_dim20_bPerm[0]; rk_dim20_bPerm[16] += A[321]*rk_dim20_bPerm[1]; rk_dim20_bPerm[16] += A[322]*rk_dim20_bPerm[2]; rk_dim20_bPerm[16] += A[323]*rk_dim20_bPerm[3]; rk_dim20_bPerm[16] += A[324]*rk_dim20_bPerm[4]; rk_dim20_bPerm[16] += A[325]*rk_dim20_bPerm[5]; rk_dim20_bPerm[16] += A[326]*rk_dim20_bPerm[6]; rk_dim20_bPerm[16] += A[327]*rk_dim20_bPerm[7]; rk_dim20_bPerm[16] += A[328]*rk_dim20_bPerm[8]; rk_dim20_bPerm[16] += A[329]*rk_dim20_bPerm[9]; rk_dim20_bPerm[16] += A[330]*rk_dim20_bPerm[10]; rk_dim20_bPerm[16] += A[331]*rk_dim20_bPerm[11]; rk_dim20_bPerm[16] += A[332]*rk_dim20_bPerm[12]; rk_dim20_bPerm[16] += A[333]*rk_dim20_bPerm[13]; rk_dim20_bPerm[16] += A[334]*rk_dim20_bPerm[14]; rk_dim20_bPerm[16] += A[335]*rk_dim20_bPerm[15]; rk_dim20_bPerm[17] += A[340]*rk_dim20_bPerm[0]; rk_dim20_bPerm[17] += A[341]*rk_dim20_bPerm[1]; rk_dim20_bPerm[17] += A[342]*rk_dim20_bPerm[2]; rk_dim20_bPerm[17] += A[343]*rk_dim20_bPerm[3]; rk_dim20_bPerm[17] += A[344]*rk_dim20_bPerm[4]; rk_dim20_bPerm[17] += A[345]*rk_dim20_bPerm[5]; rk_dim20_bPerm[17] += A[346]*rk_dim20_bPerm[6]; rk_dim20_bPerm[17] += A[347]*rk_dim20_bPerm[7]; rk_dim20_bPerm[17] += A[348]*rk_dim20_bPerm[8]; rk_dim20_bPerm[17] += A[349]*rk_dim20_bPerm[9]; rk_dim20_bPerm[17] += A[350]*rk_dim20_bPerm[10]; rk_dim20_bPerm[17] += A[351]*rk_dim20_bPerm[11]; rk_dim20_bPerm[17] += A[352]*rk_dim20_bPerm[12]; rk_dim20_bPerm[17] += A[353]*rk_dim20_bPerm[13]; rk_dim20_bPerm[17] += A[354]*rk_dim20_bPerm[14]; rk_dim20_bPerm[17] += A[355]*rk_dim20_bPerm[15]; rk_dim20_bPerm[17] += A[356]*rk_dim20_bPerm[16]; rk_dim20_bPerm[18] += A[360]*rk_dim20_bPerm[0]; rk_dim20_bPerm[18] += A[361]*rk_dim20_bPerm[1]; rk_dim20_bPerm[18] += A[362]*rk_dim20_bPerm[2]; rk_dim20_bPerm[18] += A[363]*rk_dim20_bPerm[3]; rk_dim20_bPerm[18] += A[364]*rk_dim20_bPerm[4]; rk_dim20_bPerm[18] += A[365]*rk_dim20_bPerm[5]; rk_dim20_bPerm[18] += A[366]*rk_dim20_bPerm[6]; rk_dim20_bPerm[18] += A[367]*rk_dim20_bPerm[7]; rk_dim20_bPerm[18] += A[368]*rk_dim20_bPerm[8]; rk_dim20_bPerm[18] += A[369]*rk_dim20_bPerm[9]; rk_dim20_bPerm[18] += A[370]*rk_dim20_bPerm[10]; rk_dim20_bPerm[18] += A[371]*rk_dim20_bPerm[11]; rk_dim20_bPerm[18] += A[372]*rk_dim20_bPerm[12]; rk_dim20_bPerm[18] += A[373]*rk_dim20_bPerm[13]; rk_dim20_bPerm[18] += A[374]*rk_dim20_bPerm[14]; rk_dim20_bPerm[18] += A[375]*rk_dim20_bPerm[15]; rk_dim20_bPerm[18] += A[376]*rk_dim20_bPerm[16]; rk_dim20_bPerm[18] += A[377]*rk_dim20_bPerm[17]; rk_dim20_bPerm[19] += A[380]*rk_dim20_bPerm[0]; rk_dim20_bPerm[19] += A[381]*rk_dim20_bPerm[1]; rk_dim20_bPerm[19] += A[382]*rk_dim20_bPerm[2]; rk_dim20_bPerm[19] += A[383]*rk_dim20_bPerm[3]; rk_dim20_bPerm[19] += A[384]*rk_dim20_bPerm[4]; rk_dim20_bPerm[19] += A[385]*rk_dim20_bPerm[5]; rk_dim20_bPerm[19] += A[386]*rk_dim20_bPerm[6]; rk_dim20_bPerm[19] += A[387]*rk_dim20_bPerm[7]; rk_dim20_bPerm[19] += A[388]*rk_dim20_bPerm[8]; rk_dim20_bPerm[19] += A[389]*rk_dim20_bPerm[9]; rk_dim20_bPerm[19] += A[390]*rk_dim20_bPerm[10]; rk_dim20_bPerm[19] += A[391]*rk_dim20_bPerm[11]; rk_dim20_bPerm[19] += A[392]*rk_dim20_bPerm[12]; rk_dim20_bPerm[19] += A[393]*rk_dim20_bPerm[13]; rk_dim20_bPerm[19] += A[394]*rk_dim20_bPerm[14]; rk_dim20_bPerm[19] += A[395]*rk_dim20_bPerm[15]; rk_dim20_bPerm[19] += A[396]*rk_dim20_bPerm[16]; rk_dim20_bPerm[19] += A[397]*rk_dim20_bPerm[17]; rk_dim20_bPerm[19] += A[398]*rk_dim20_bPerm[18]; acado_solve_dim20_triangular( A, rk_dim20_bPerm ); b[0] = rk_dim20_bPerm[0]; b[1] = rk_dim20_bPerm[1]; b[2] = rk_dim20_bPerm[2]; b[3] = rk_dim20_bPerm[3]; b[4] = rk_dim20_bPerm[4]; b[5] = rk_dim20_bPerm[5]; b[6] = rk_dim20_bPerm[6]; b[7] = rk_dim20_bPerm[7]; b[8] = rk_dim20_bPerm[8]; b[9] = rk_dim20_bPerm[9]; b[10] = rk_dim20_bPerm[10]; b[11] = rk_dim20_bPerm[11]; b[12] = rk_dim20_bPerm[12]; b[13] = rk_dim20_bPerm[13]; b[14] = rk_dim20_bPerm[14]; b[15] = rk_dim20_bPerm[15]; b[16] = rk_dim20_bPerm[16]; b[17] = rk_dim20_bPerm[17]; b[18] = rk_dim20_bPerm[18]; b[19] = rk_dim20_bPerm[19]; } /** Matrix of size: 2 x 2 (row major format) */ static const real_t acado_Ah_mat[ 4 ] = { 2.5000000000000001e-02, 5.3867513459481292e-02, -3.8675134594812867e-03, 2.5000000000000001e-02 }; /* Fixed step size:0.1 */ int acado_integrate( real_t* const rk_eta, int resetIntegrator ) { int error; int i; int j; int k; int run; int run1; int tmp_index1; int tmp_index2; real_t det; rk_ttt = 0.0000000000000000e+00; rk_xxx[10] = rk_eta[150]; rk_xxx[11] = rk_eta[151]; rk_xxx[12] = rk_eta[152]; rk_xxx[13] = rk_eta[153]; rk_xxx[14] = rk_eta[154]; rk_xxx[15] = rk_eta[155]; rk_xxx[16] = rk_eta[156]; rk_xxx[17] = rk_eta[157]; rk_xxx[18] = rk_eta[158]; rk_xxx[19] = rk_eta[159]; rk_xxx[20] = rk_eta[160]; rk_xxx[21] = rk_eta[161]; rk_xxx[22] = rk_eta[162]; rk_xxx[23] = rk_eta[163]; for (run = 0; run < 1; ++run) { if( resetIntegrator ) { for (i = 0; i < 1; ++i) { for (run1 = 0; run1 < 2; ++run1) { for (j = 0; j < 10; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1 * 2]*rk_kkk[tmp_index1 * 2]; rk_xxx[j] += + acado_Ah_mat[run1 * 2 + 1]*rk_kkk[tmp_index1 * 2 + 1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 140 ]) ); for (j = 0; j < 10; ++j) { tmp_index1 = (run1 * 10) + (j); rk_A[tmp_index1 * 20] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14)]; rk_A[tmp_index1 * 20 + 1] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 1)]; rk_A[tmp_index1 * 20 + 2] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 2)]; rk_A[tmp_index1 * 20 + 3] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 3)]; rk_A[tmp_index1 * 20 + 4] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 4)]; rk_A[tmp_index1 * 20 + 5] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 5)]; rk_A[tmp_index1 * 20 + 6] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 6)]; rk_A[tmp_index1 * 20 + 7] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 7)]; rk_A[tmp_index1 * 20 + 8] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 8)]; rk_A[tmp_index1 * 20 + 9] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 9)]; if( 0 == run1 ) rk_A[(tmp_index1 * 20) + (j)] -= 1.0000000000000000e+00; rk_A[tmp_index1 * 20 + 10] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14)]; rk_A[tmp_index1 * 20 + 11] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 1)]; rk_A[tmp_index1 * 20 + 12] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 2)]; rk_A[tmp_index1 * 20 + 13] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 3)]; rk_A[tmp_index1 * 20 + 14] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 4)]; rk_A[tmp_index1 * 20 + 15] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 5)]; rk_A[tmp_index1 * 20 + 16] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 6)]; rk_A[tmp_index1 * 20 + 17] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 7)]; rk_A[tmp_index1 * 20 + 18] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 8)]; rk_A[tmp_index1 * 20 + 19] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 9)]; if( 1 == run1 ) rk_A[(tmp_index1 * 20) + (j + 10)] -= 1.0000000000000000e+00; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 10] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 10 + 1] = rk_kkk[run1 + 2] - rk_rhsTemp[1]; rk_b[run1 * 10 + 2] = rk_kkk[run1 + 4] - rk_rhsTemp[2]; rk_b[run1 * 10 + 3] = rk_kkk[run1 + 6] - rk_rhsTemp[3]; rk_b[run1 * 10 + 4] = rk_kkk[run1 + 8] - rk_rhsTemp[4]; rk_b[run1 * 10 + 5] = rk_kkk[run1 + 10] - rk_rhsTemp[5]; rk_b[run1 * 10 + 6] = rk_kkk[run1 + 12] - rk_rhsTemp[6]; rk_b[run1 * 10 + 7] = rk_kkk[run1 + 14] - rk_rhsTemp[7]; rk_b[run1 * 10 + 8] = rk_kkk[run1 + 16] - rk_rhsTemp[8]; rk_b[run1 * 10 + 9] = rk_kkk[run1 + 18] - rk_rhsTemp[9]; } det = acado_solve_dim20_system( rk_A, rk_b, rk_dim20_perm ); for (j = 0; j < 2; ++j) { rk_kkk[j] += rk_b[j * 10]; rk_kkk[j + 2] += rk_b[j * 10 + 1]; rk_kkk[j + 4] += rk_b[j * 10 + 2]; rk_kkk[j + 6] += rk_b[j * 10 + 3]; rk_kkk[j + 8] += rk_b[j * 10 + 4]; rk_kkk[j + 10] += rk_b[j * 10 + 5]; rk_kkk[j + 12] += rk_b[j * 10 + 6]; rk_kkk[j + 14] += rk_b[j * 10 + 7]; rk_kkk[j + 16] += rk_b[j * 10 + 8]; rk_kkk[j + 18] += rk_b[j * 10 + 9]; } } } for (i = 0; i < 5; ++i) { for (run1 = 0; run1 < 2; ++run1) { for (j = 0; j < 10; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1 * 2]*rk_kkk[tmp_index1 * 2]; rk_xxx[j] += + acado_Ah_mat[run1 * 2 + 1]*rk_kkk[tmp_index1 * 2 + 1]; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 10] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 10 + 1] = rk_kkk[run1 + 2] - rk_rhsTemp[1]; rk_b[run1 * 10 + 2] = rk_kkk[run1 + 4] - rk_rhsTemp[2]; rk_b[run1 * 10 + 3] = rk_kkk[run1 + 6] - rk_rhsTemp[3]; rk_b[run1 * 10 + 4] = rk_kkk[run1 + 8] - rk_rhsTemp[4]; rk_b[run1 * 10 + 5] = rk_kkk[run1 + 10] - rk_rhsTemp[5]; rk_b[run1 * 10 + 6] = rk_kkk[run1 + 12] - rk_rhsTemp[6]; rk_b[run1 * 10 + 7] = rk_kkk[run1 + 14] - rk_rhsTemp[7]; rk_b[run1 * 10 + 8] = rk_kkk[run1 + 16] - rk_rhsTemp[8]; rk_b[run1 * 10 + 9] = rk_kkk[run1 + 18] - rk_rhsTemp[9]; } acado_solve_dim20_system_reuse( rk_A, rk_b, rk_dim20_perm ); for (j = 0; j < 2; ++j) { rk_kkk[j] += rk_b[j * 10]; rk_kkk[j + 2] += rk_b[j * 10 + 1]; rk_kkk[j + 4] += rk_b[j * 10 + 2]; rk_kkk[j + 6] += rk_b[j * 10 + 3]; rk_kkk[j + 8] += rk_b[j * 10 + 4]; rk_kkk[j + 10] += rk_b[j * 10 + 5]; rk_kkk[j + 12] += rk_b[j * 10 + 6]; rk_kkk[j + 14] += rk_b[j * 10 + 7]; rk_kkk[j + 16] += rk_b[j * 10 + 8]; rk_kkk[j + 18] += rk_b[j * 10 + 9]; } } for (run1 = 0; run1 < 2; ++run1) { for (j = 0; j < 10; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1 * 2]*rk_kkk[tmp_index1 * 2]; rk_xxx[j] += + acado_Ah_mat[run1 * 2 + 1]*rk_kkk[tmp_index1 * 2 + 1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 140 ]) ); for (j = 0; j < 10; ++j) { tmp_index1 = (run1 * 10) + (j); rk_A[tmp_index1 * 20] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14)]; rk_A[tmp_index1 * 20 + 1] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 1)]; rk_A[tmp_index1 * 20 + 2] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 2)]; rk_A[tmp_index1 * 20 + 3] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 3)]; rk_A[tmp_index1 * 20 + 4] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 4)]; rk_A[tmp_index1 * 20 + 5] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 5)]; rk_A[tmp_index1 * 20 + 6] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 6)]; rk_A[tmp_index1 * 20 + 7] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 7)]; rk_A[tmp_index1 * 20 + 8] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 8)]; rk_A[tmp_index1 * 20 + 9] = + acado_Ah_mat[run1 * 2]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 9)]; if( 0 == run1 ) rk_A[(tmp_index1 * 20) + (j)] -= 1.0000000000000000e+00; rk_A[tmp_index1 * 20 + 10] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14)]; rk_A[tmp_index1 * 20 + 11] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 1)]; rk_A[tmp_index1 * 20 + 12] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 2)]; rk_A[tmp_index1 * 20 + 13] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 3)]; rk_A[tmp_index1 * 20 + 14] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 4)]; rk_A[tmp_index1 * 20 + 15] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 5)]; rk_A[tmp_index1 * 20 + 16] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 6)]; rk_A[tmp_index1 * 20 + 17] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 7)]; rk_A[tmp_index1 * 20 + 18] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 8)]; rk_A[tmp_index1 * 20 + 19] = + acado_Ah_mat[run1 * 2 + 1]*rk_diffsTemp2[(run1 * 140) + (j * 14 + 9)]; if( 1 == run1 ) rk_A[(tmp_index1 * 20) + (j + 10)] -= 1.0000000000000000e+00; } } for (run1 = 0; run1 < 10; ++run1) { for (i = 0; i < 2; ++i) { rk_b[i * 10] = - rk_diffsTemp2[(i * 140) + (run1)]; rk_b[i * 10 + 1] = - rk_diffsTemp2[(i * 140) + (run1 + 14)]; rk_b[i * 10 + 2] = - rk_diffsTemp2[(i * 140) + (run1 + 28)]; rk_b[i * 10 + 3] = - rk_diffsTemp2[(i * 140) + (run1 + 42)]; rk_b[i * 10 + 4] = - rk_diffsTemp2[(i * 140) + (run1 + 56)]; rk_b[i * 10 + 5] = - rk_diffsTemp2[(i * 140) + (run1 + 70)]; rk_b[i * 10 + 6] = - rk_diffsTemp2[(i * 140) + (run1 + 84)]; rk_b[i * 10 + 7] = - rk_diffsTemp2[(i * 140) + (run1 + 98)]; rk_b[i * 10 + 8] = - rk_diffsTemp2[(i * 140) + (run1 + 112)]; rk_b[i * 10 + 9] = - rk_diffsTemp2[(i * 140) + (run1 + 126)]; } if( 0 == run1 ) { det = acado_solve_dim20_system( rk_A, rk_b, rk_dim20_perm ); } else { acado_solve_dim20_system_reuse( rk_A, rk_b, rk_dim20_perm ); } for (i = 0; i < 2; ++i) { rk_diffK[i] = rk_b[i * 10]; rk_diffK[i + 2] = rk_b[i * 10 + 1]; rk_diffK[i + 4] = rk_b[i * 10 + 2]; rk_diffK[i + 6] = rk_b[i * 10 + 3]; rk_diffK[i + 8] = rk_b[i * 10 + 4]; rk_diffK[i + 10] = rk_b[i * 10 + 5]; rk_diffK[i + 12] = rk_b[i * 10 + 6]; rk_diffK[i + 14] = rk_b[i * 10 + 7]; rk_diffK[i + 16] = rk_b[i * 10 + 8]; rk_diffK[i + 18] = rk_b[i * 10 + 9]; } for (i = 0; i < 10; ++i) { rk_diffsNew2[(i * 14) + (run1)] = (i == run1-0); rk_diffsNew2[(i * 14) + (run1)] += + rk_diffK[i * 2]*(real_t)5.0000000000000003e-02 + rk_diffK[i * 2 + 1]*(real_t)5.0000000000000003e-02; } } for (run1 = 0; run1 < 4; ++run1) { for (i = 0; i < 2; ++i) { for (j = 0; j < 10; ++j) { tmp_index1 = (i * 10) + (j); tmp_index2 = (run1) + (j * 14); rk_b[tmp_index1] = - rk_diffsTemp2[(i * 140) + (tmp_index2 + 10)]; } } acado_solve_dim20_system_reuse( rk_A, rk_b, rk_dim20_perm ); for (i = 0; i < 2; ++i) { rk_diffK[i] = rk_b[i * 10]; rk_diffK[i + 2] = rk_b[i * 10 + 1]; rk_diffK[i + 4] = rk_b[i * 10 + 2]; rk_diffK[i + 6] = rk_b[i * 10 + 3]; rk_diffK[i + 8] = rk_b[i * 10 + 4]; rk_diffK[i + 10] = rk_b[i * 10 + 5]; rk_diffK[i + 12] = rk_b[i * 10 + 6]; rk_diffK[i + 14] = rk_b[i * 10 + 7]; rk_diffK[i + 16] = rk_b[i * 10 + 8]; rk_diffK[i + 18] = rk_b[i * 10 + 9]; } for (i = 0; i < 10; ++i) { rk_diffsNew2[(i * 14) + (run1 + 10)] = + rk_diffK[i * 2]*(real_t)5.0000000000000003e-02 + rk_diffK[i * 2 + 1]*(real_t)5.0000000000000003e-02; } } rk_eta[0] += + rk_kkk[0]*(real_t)5.0000000000000003e-02 + rk_kkk[1]*(real_t)5.0000000000000003e-02; rk_eta[1] += + rk_kkk[2]*(real_t)5.0000000000000003e-02 + rk_kkk[3]*(real_t)5.0000000000000003e-02; rk_eta[2] += + rk_kkk[4]*(real_t)5.0000000000000003e-02 + rk_kkk[5]*(real_t)5.0000000000000003e-02; rk_eta[3] += + rk_kkk[6]*(real_t)5.0000000000000003e-02 + rk_kkk[7]*(real_t)5.0000000000000003e-02; rk_eta[4] += + rk_kkk[8]*(real_t)5.0000000000000003e-02 + rk_kkk[9]*(real_t)5.0000000000000003e-02; rk_eta[5] += + rk_kkk[10]*(real_t)5.0000000000000003e-02 + rk_kkk[11]*(real_t)5.0000000000000003e-02; rk_eta[6] += + rk_kkk[12]*(real_t)5.0000000000000003e-02 + rk_kkk[13]*(real_t)5.0000000000000003e-02; rk_eta[7] += + rk_kkk[14]*(real_t)5.0000000000000003e-02 + rk_kkk[15]*(real_t)5.0000000000000003e-02; rk_eta[8] += + rk_kkk[16]*(real_t)5.0000000000000003e-02 + rk_kkk[17]*(real_t)5.0000000000000003e-02; rk_eta[9] += + rk_kkk[18]*(real_t)5.0000000000000003e-02 + rk_kkk[19]*(real_t)5.0000000000000003e-02; for (i = 0; i < 10; ++i) { for (j = 0; j < 10; ++j) { tmp_index2 = (j) + (i * 10); rk_eta[tmp_index2 + 10] = rk_diffsNew2[(i * 14) + (j)]; } for (j = 0; j < 4; ++j) { tmp_index2 = (j) + (i * 4); rk_eta[tmp_index2 + 110] = rk_diffsNew2[(i * 14) + (j + 10)]; } } resetIntegrator = 0; rk_ttt += 1.0000000000000000e+00; } for (i = 0; i < 10; ++i) { } if( det < 1e-12 ) { error = 2; } else if( det < 1e-6 ) { error = 1; } else { error = 0; } return error; }
parallel_levelset_distance_calculator.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED ) #define KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "utilities/geometry_utilities.h" #include "includes/deprecated_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ template< unsigned int TDim> class ParallelDistanceCalculator { public: ///@name Type Definitions ///@{ KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE); /// Pointer definition of ParallelDistanceCalculator KRATOS_CLASS_POINTER_DEFINITION(ParallelDistanceCalculator); ///@} ///@name Life Cycle ///@{ /// Default constructor. ParallelDistanceCalculator() {}; /// Destructor. virtual ~ParallelDistanceCalculator() {}; ///Function to calculate a signed distance function suitable for calculations using the Level Set Method ///the function assumes given a "signed distance" distributions and recomputes the distances ///respecting as accurately as possible the position of the zero of the original distributions ///@param rModelPart is the ModelPart on which we will operate ///@param rDistanceVar is the Variable that we will use in calculating the distance ///@param rAreaVar is the Variable that we will use for L2 projections ///@param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances ///@param max_distance distances will not be computed after reaching this limit void CalculateDistances(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance, Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse()) { KRATOS_TRY Check(rModelPart, rDistanceVar, rAreaVar); ResetVariables(rModelPart,rDistanceVar, max_distance); CalculateExactDistancesOnDividedElements(rModelPart, rDistanceVar, rAreaVar, max_distance, Options); ExtendDistancesByLayer(rModelPart, rDistanceVar, rAreaVar, max_levels, max_distance); AssignDistanceSign(rModelPart, rDistanceVar, rAreaVar, max_distance); KRATOS_CATCH("") } ///Function to calculate a signed distance function suitable for calculations using the Level Set Method ///The difference of this function with previous one is the fact that it wont recalculate the exact distance ///in divided elements in order to preserve the current distance. ///the function assumes given a "signed distance" distributions and recomputes the distances ///respecting as accurately as possible the position of the zero of the original distributions ///@param rModelPart is the ModelPart on which we will operate ///@param rDistanceVar is the Variable that we will use in calculating the distance ///@param rAreaVar is the Variable that we will use for L2 projections ///@param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances ///@param max_distance distances will not be computed after reaching this limit void CalculateInterfacePreservingDistances(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance) { KRATOS_TRY Check(rModelPart, rDistanceVar, rAreaVar); ResetVariables(rModelPart,rDistanceVar, max_distance); AbsDistancesOnDividedElements(rModelPart, rDistanceVar, rAreaVar, max_distance); ExtendDistancesByLayer(rModelPart, rDistanceVar, rAreaVar, max_levels, max_distance); AssignDistanceSign(rModelPart, rDistanceVar, rAreaVar, max_distance); KRATOS_CATCH("") } /// A simplified version of CalculateDistances to be used when the rDistanceVar == 0 surface is described by a set of nodes /** * @param rModelPart is the ModelPart on which we will operate * @param rDistanceVar is the Variable that we will use in calculating the distance * @param rAreaVar is the Variable that we will use for L2 projections * @param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances * @param max_distance distances will not be computed after reaching this limit * @see ParallelDistanceCalculator::CalculateDistances */ void CalculateDistancesLagrangianSurface(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance) { KRATOS_TRY bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; array_1d<double,TDim+1> visited; const int elem_size = rModelPart.Elements().size(); const int node_size = rModelPart.Nodes().size(); //check that variables needed are in the model part if(node_size && !(rModelPart.NodesBegin()->SolutionStepsDataHas(rDistanceVar)) ) KRATOS_THROW_ERROR(std::logic_error,"distance Variable is not in the model part",""); if(node_size && !(rModelPart.NodesBegin()->SolutionStepsDataHas(rAreaVar)) ) KRATOS_THROW_ERROR(std::logic_error,"Area Variable is not in the model part",""); if(is_distributed == true) if(node_size && !(rModelPart.NodesBegin()->SolutionStepsDataHas(PARTITION_INDEX)) ) KRATOS_THROW_ERROR(std::logic_error,"PARTITION_INDEX Variable is not in the model part",""); // set to zero the distance #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); area = 0.0; double& is_visited = it->GetValue(IS_VISITED); double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = it->FastGetSolutionStepValue(rDistanceVar); if(is_visited != 1.0) { distance = 0.0; } else area = 1.0; // else if(dist < 0.0) // KRATOS_THROW_ERROR(std::logic_error,"ATTENTION: prescribed distance function set to a number smaller than 0!!",""); } array_1d<double,TDim+1> N; BoundedMatrix <double, TDim+1,TDim> DN_DX; // Extend the distances layer by layer up to a maximum level of layers for(unsigned int level=0; level<max_levels; level++) { //loop on active elements and advance the distance computation #pragma omp parallel for private(DN_DX,visited) for(int i = 0; i<elem_size; i++) { PointerVector< Element>::iterator it=rModelPart.ElementsBegin()+i; Geometry<Node<3> >&geom = it->GetGeometry(); for(unsigned int j=0; j<TDim+1; j++) visited[j] = (static_cast<const Node<3> & >(geom[j])).GetValue(IS_VISITED); if(IsActive(visited)) { double Volume; GeometryUtils::CalculateGeometryData(geom,DN_DX,N,Volume); AddDistanceToNodes(rDistanceVar,rAreaVar,geom,DN_DX,Volume); } } //mpi sync variables if(is_distributed == true) { #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; if(it->GetValue(IS_VISITED) == 1.0) { double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = distance; distance = 0.0; } else it->GetValue(rDistanceVar) = 0.0; } rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().AssembleCurrentData(rDistanceVar); #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; it->FastGetSolutionStepValue(rDistanceVar) += it->GetValue(rDistanceVar); } rModelPart.GetCommunicator().GetDataCommunicator().Barrier(); } //finalize the computation of the distance #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); double& is_visited = it->GetValue(IS_VISITED); if(area > 1e-20 && is_visited != 1.0) //this implies that node was computed at the current level and not before { double& distance = it->FastGetSolutionStepValue(rDistanceVar); distance /= area; is_visited = 1.0; } } } //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //assign the sign to the distance function according to the original distribution. Set to max for nodes that were not calculated #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; const double area = it->FastGetSolutionStepValue(rAreaVar); double& dist = it->FastGetSolutionStepValue(rDistanceVar); if(dist > max_distance || area <1e-20) dist = max_distance; // if(it->GetValue(IS_FLUID) == 1.0) // dist = -fabs(dist); // else // dist = fabs(dist); } KRATOS_CATCH("") } //********************************************************************************** //********************************************************************************** double FindMaximumEdgeSize(ModelPart& r_model_part) { KRATOS_TRY double h_max = 0.0; for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double h = 0.0; for(unsigned int i=0; i<TDim+1; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for(unsigned int j=i+1; j<TDim+1; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > h) h = l; } } h = sqrt(h); if(h > h_max) h_max = h; } h_max = r_model_part.GetCommunicator().GetDataCommunicator().MaxAll(h_max); return h_max; KRATOS_CATCH(""); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "ParallelDistanceCalculator" << TDim << "D"; return buffer.str(); }; /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "ParallelDistanceCalculator" << TDim << "D"; }; /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {}; ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ //******************************************************************* bool IsDivided(array_1d<double,TDim+1>& dist) { unsigned int positive = 0; unsigned int negative = 0; for(unsigned int i=0; i<TDim+1; i++) { if(dist[i] >= 0) positive++; else negative++; } bool is_divided = false; if(positive > 0 && negative>0) is_divided = true; return is_divided; } //******************************************************************* bool IsActive(array_1d<double,TDim+1>& visited) { unsigned int positive = 0; for(unsigned int i=0; i<TDim+1; i++) if(visited[i] > 0.9999999999) //node was considered positive++; bool is_active = false; if(positive == TDim) is_active = true; return is_active; } //******************************************************************* void ComputeExactDistances(const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Area, Geometry<Node<3> >& geom, const array_1d<double,TDim+1>& distances, array_1d<double,TDim+1>& exact_dist ) { array_1d<double,TDim> grad_d; array_1d<double,3> coord_on_0 = ZeroVector(3); array_1d<double,3> temp; //compute the gradient of the distance and normalize it noalias(grad_d) = prod(trans(DN_DX),distances); double norm = norm_2(grad_d); grad_d /= norm; //find one division point on one edge for(unsigned int i = 1; i<TDim+1; i++) { if(distances[0]*distances[i]<=0.0) //if the edge is divided { double delta_d = fabs(distances[i]) + fabs(distances[0]); if(delta_d>1e-20) { double Ni = fabs(distances[0]) / delta_d; double N0 = fabs(distances[i]) / delta_d; noalias(coord_on_0) = N0 * geom[0].Coordinates(); noalias(coord_on_0) += Ni * geom[i].Coordinates(); } else noalias(coord_on_0) = geom[0].Coordinates(); break; } } //now calculate the distance of all the nodes from the elemental free surface for(unsigned int i = 0; i<TDim+1; i++) { noalias(temp) = geom[i].Coordinates(); noalias(temp) -= coord_on_0 ; double real_distance = 0.0; for(unsigned int k=0; k<TDim; k++) real_distance += temp[k]*grad_d[k]; real_distance = fabs(real_distance); exact_dist[i] = real_distance; } } //******************************************************************* void AddDistanceToNodesNew(const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, Geometry<Node<3> >& geom, const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Volume ) { unsigned int unknown_node_index = 0; array_1d<double,TDim> d; double nodal_vol = Volume/static_cast<double>(TDim+1); double avg_dist = 0.0; Matrix coord_a(3,3); int row = 0; int reference_node_index; //compute discriminant and find the index of the unknown node noalias(d) = ZeroVector(TDim); for (unsigned int iii = 0; iii < TDim + 1; iii++) { double node_is_known = geom[iii].GetValue(IS_VISITED); if (node_is_known == 1) //identyfing the known node { reference_node_index = iii; for(int i_coord = 0 ; i_coord < 3 ; i_coord++) coord_a(row,i_coord) = geom[iii].Coordinates()[i_coord]; d[row] = geom[iii].FastGetSolutionStepValue(rDistanceVar); avg_dist += d[row]; row++; } else unknown_node_index = iii; } avg_dist /= static_cast<double>(TDim); Matrix inverse_a(3,3); double det_a; MathUtils<double>::InvertMatrix3(coord_a,inverse_a,det_a); array_1d<double,TDim> x; // normal to the surface noalias(x) = prod(inverse_a,d); double norm_x = norm_2(x); x /= norm_x; array_1d<double,TDim> v = geom[unknown_node_index].Coordinates() - geom[reference_node_index].Coordinates(); double distance = inner_prod(x,v); distance += geom[reference_node_index].FastGetSolutionStepValue(rDistanceVar); //KRATOS_WATCH(coord_a) //KRATOS_WATCH(distance) geom[unknown_node_index].SetLock(); geom[unknown_node_index].FastGetSolutionStepValue(rDistanceVar) += distance*nodal_vol; geom[unknown_node_index].FastGetSolutionStepValue(rAreaVar) += nodal_vol; geom[unknown_node_index].UnSetLock(); //GeometryUtils::CalculateTetrahedraDistances(element_geometry, dist); } //******************************************************************* void AddDistanceToNodes(const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, Geometry<Node<3> >& geom, const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Volume ) { unsigned int unknown_node_index = 0; array_1d<double,TDim> d; double nodal_vol = Volume/static_cast<double>(TDim+1); double avg_dist = 0.0; //compute discriminant and find the index of the unknown node noalias(d) = ZeroVector(TDim); for (unsigned int iii = 0; iii < TDim + 1; iii++) { double node_is_known = geom[iii].GetValue(IS_VISITED); if (node_is_known == 1) //identyfing the unknown node { const double distance = geom[iii].FastGetSolutionStepValue(rDistanceVar); avg_dist += distance; for (unsigned int jjj = 0; jjj < TDim; jjj++) d[jjj] += DN_DX(iii, jjj) * distance; } else unknown_node_index = iii; } avg_dist /= static_cast<double>(TDim); //finalizing computation of discriminant double c = -1.0; double a = 0.0; double b = 0.0; for (unsigned int jjj = 0; jjj < TDim; jjj++) { a += DN_DX(unknown_node_index, jjj) * DN_DX(unknown_node_index, jjj); b += d[jjj] * DN_DX(unknown_node_index, jjj); c += d[jjj] * d[jjj]; } b *= 2.0; //here we require (a*x^2 + b*x + c)^2 to be minimum (x represents the unknown distance) //this implies setting to zero //(a*x^2 + b*x + c)*(2ax+b) = 0 double distance; double discriminant = b * b - 4.0 * a*c; if (discriminant < 0.0) //here we solve (2ax+b) = 0 { // double numerator = 0.0; // double denominator = 0.0; // for(unsigned int i=0; i<TDim+1; i++) // { // for (unsigned int jjj = 0; jjj < TDim; jjj++) // { // if(i != unknown_node_index) // numerator += DN_DX(unknown_node_index, jjj) * DN_DX(i, jjj); // else // denominator += DN_DX(unknown_node_index, jjj)*DN_DX(unknown_node_index, jjj); // } // } // distance = - numerator/denominator; // // KRATOS_WATCH(geom[unknown_node_index].Id()); // KRATOS_WATCH(discriminant); distance = -b / (2.0*a); //avg_dist ; // } else //in this case we solve (a*x^2 + b*x + c)=0 { //(accurate) computation of the distance //requires the solution of a*x^2+b*x+c=0 double q, root1, root2; double sqrt_det = sqrt(discriminant); if (a != 0.0) { if (b > 0) q = -0.5 * (b + sqrt_det); else q = -0.5 * (b - sqrt_det); root1 = q / a; root2 = c / q; if (root1 > root2) distance = root1; else distance = root2; } else //in this case we have a linear equation { distance = -c / b; } } if(distance < 0.0) distance = 1e-15; geom[unknown_node_index].SetLock(); geom[unknown_node_index].FastGetSolutionStepValue(rDistanceVar) += distance*nodal_vol; geom[unknown_node_index].FastGetSolutionStepValue(rAreaVar) += nodal_vol; geom[unknown_node_index].UnSetLock(); } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void Check(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar) { KRATOS_TRY bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; const int node_size = rModelPart.Nodes().size(); //check that variables needed are in the model part if(node_size && !(rModelPart.NodesBegin()->SolutionStepsDataHas(rDistanceVar)) ) KRATOS_THROW_ERROR(std::logic_error,"distance Variable is not in the model part",""); if(node_size && !(rModelPart.NodesBegin()->SolutionStepsDataHas(rAreaVar)) ) KRATOS_THROW_ERROR(std::logic_error,"Area Variable is not in the model part",""); if(is_distributed == true) if(node_size && !(rModelPart.NodesBegin()->SolutionStepsDataHas(PARTITION_INDEX)) ) KRATOS_THROW_ERROR(std::logic_error,"PARTITION_INDEX Variable is not in the model part","") KRATOS_CATCH("") } void ResetVariables(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const double MaxDistance) { KRATOS_TRY //reset the variables needed const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; //it->FastGetSolutionStepValue(rAreaVar) = 0.0; double& dist = it->FastGetSolutionStepValue(rDistanceVar); it->SetValue(rDistanceVar,dist); //here we copy the distance function to the fixed database if(dist < 0.0) it->SetValue(IS_FLUID,1.0); else it->SetValue(IS_FLUID,0.0); dist = MaxDistance; it->SetValue(IS_VISITED,0); } KRATOS_CATCH("") } void CalculateExactDistancesOnDividedElements(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance, Flags Options) { KRATOS_TRY //identify the list of elements divided by the original distance distribution and recompute an "exact" distance //attempting to mantain the original position of the free surface //note that the backup value is used in calculating the position of the free surface and the divided elements array_1d<double,TDim+1> dist, exact_dist; array_1d<double,TDim+1> visited; // double lumping_factor = 1.0/double(TDim+1); int elem_size = rModelPart.Elements().size(); #pragma omp parallel for private(dist,exact_dist) firstprivate(elem_size) for (int i = 0; i < elem_size; i++) { PointerVector< Element>::iterator it = rModelPart.ElementsBegin() + i; Geometry<Node < 3 > >& element_geometry = it->GetGeometry(); for (unsigned int j = 0; j < TDim + 1; j++) dist[j] = element_geometry[j].GetValue(rDistanceVar); bool is_divided = IsDivided(dist); if (is_divided == true) { if (Options.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) GeometryUtils::CalculateExactDistancesToPlane(element_geometry, dist); else GeometryUtils::CalculateTetrahedraDistances(element_geometry, dist); // loop over nodes and apply the new distances. for (unsigned int i_node = 0; i_node < element_geometry.size(); i_node++) { double& distance = element_geometry[i_node].GetSolutionStepValue(rDistanceVar); double new_distance = dist[i_node]; element_geometry[i_node].SetLock(); if (fabs(distance) > fabs(new_distance)) distance = new_distance; element_geometry[i_node].GetValue(IS_VISITED) = 1; element_geometry[i_node].UnSetLock(); } } } //mpi sync variables rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_VISITED); rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().SynchronizeCurrentDataToMin(rDistanceVar); const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& nodal_dist = it->FastGetSolutionStepValue(rDistanceVar); double& is_visited = it->GetValue(IS_VISITED); if(is_visited == 0.00) { nodal_dist = 0.00; it->GetSolutionStepValue(rAreaVar) = 0.00; } else if(is_visited >= 1.00) // This is due to the fact that I'm using the assemble instead of sync { is_visited = 1.00; it->GetSolutionStepValue(rAreaVar) = 1.00; // This is not correct } } KRATOS_CATCH("") } void AbsDistancesOnDividedElements(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance) { KRATOS_TRY //identify the list of elements divided by the original distance distribution and recompute an "exact" distance //attempting to mantain the original position of the free surface //note that the backup value is used in calculating the position of the free surface and the divided elements array_1d<double,TDim+1> dist, exact_dist; array_1d<double,TDim+1> visited; int elem_size = rModelPart.Elements().size(); #pragma omp parallel for private(dist,exact_dist) firstprivate(elem_size) for (int i = 0; i < elem_size; i++) { PointerVector< Element>::iterator it = rModelPart.ElementsBegin() + i; Geometry<Node < 3 > >& element_geometry = it->GetGeometry(); for (unsigned int j = 0; j < TDim + 1; j++) dist[j] = element_geometry[j].GetValue(rDistanceVar); bool is_divided = IsDivided(dist); if (is_divided == true) { // loop over nodes and apply the new distances. for (unsigned int i_node = 0; i_node < element_geometry.size(); i_node++) { double& distance = element_geometry[i_node].GetSolutionStepValue(rDistanceVar); double new_distance = dist[i_node]; element_geometry[i_node].SetLock(); distance = fabs(new_distance); element_geometry[i_node].GetValue(IS_VISITED) = 1; element_geometry[i_node].UnSetLock(); } } } //mpi sync variables rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_VISITED); rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().SynchronizeCurrentDataToMin(rDistanceVar); const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& nodal_dist = it->FastGetSolutionStepValue(rDistanceVar); double& is_visited = it->GetValue(IS_VISITED); if(is_visited == 0.00) { nodal_dist = 0.00; it->GetSolutionStepValue(rAreaVar) = 0.00; } else if(is_visited >= 1.00) // This is due to the fact that I'm using the assemble instead of sync { is_visited = 1.00; it->GetSolutionStepValue(rAreaVar) = 1.00; // This is not correct } } KRATOS_CATCH("") } void ExtendDistancesByLayer(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double MaxDistance) { KRATOS_TRY array_1d<double,TDim+1> visited; array_1d<double,TDim+1> N; BoundedMatrix <double, TDim+1,TDim> DN_DX; const int elem_size = rModelPart.Elements().size(); const int node_size = rModelPart.Nodes().size(); //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //now extend the distances layer by layer up to a maximum level of layers for(unsigned int level=0; level<max_levels; level++) { //loop on active elements and advance the distance computation #pragma omp parallel for private(DN_DX,visited) for(int i = 0; i<elem_size; i++) { PointerVector< Element>::iterator it=rModelPart.ElementsBegin()+i; Geometry<Node<3> >&geom = it->GetGeometry(); for(unsigned int j=0; j<TDim+1; j++) visited[j] = geom[j].GetValue(IS_VISITED); if(IsActive(visited)) { double Volume; GeometryUtils::CalculateGeometryData(geom,DN_DX,N,Volume); AddDistanceToNodes(rDistanceVar,rAreaVar,geom,DN_DX,Volume); } } bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; //mpi sync variables if(is_distributed == true) { #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; if(it->GetValue(IS_VISITED) == 1.0) { double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = distance; distance = 0.0; } else it->GetValue(rDistanceVar) = 0.0; } rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().AssembleCurrentData(rDistanceVar); #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; it->FastGetSolutionStepValue(rDistanceVar) += it->GetValue(rDistanceVar); } rModelPart.GetCommunicator().GetDataCommunicator().Barrier(); } //finalize the computation of the distance #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); double& is_visited = it->GetValue(IS_VISITED); if(area > 1e-20 && is_visited != 1.0) //this implies that node was computed at the current level and not before { double& distance = it->FastGetSolutionStepValue(rDistanceVar); distance /= area; is_visited = 1.0; } } } KRATOS_CATCH("") } void AssignDistanceSign(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance) { KRATOS_TRY //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //assign the sign to the distance function according to the original distribution. Set to max for nodes that were not calculated const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; const double area = it->FastGetSolutionStepValue(rAreaVar); double& dist = it->FastGetSolutionStepValue(rDistanceVar); if(dist < 0.0) KRATOS_THROW_ERROR(std::logic_error,"IMPOSSIBLE negative distance found !!",""); if(dist > MaxDistance || area <1e-20) //if(dist > max_distance) dist = MaxDistance; if(it->GetValue(IS_FLUID) == 1.0) dist = -fabs(dist); else dist = fabs(dist); } KRATOS_CATCH("") } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. ParallelDistanceCalculator<TDim>& operator=(ParallelDistanceCalculator<TDim> const& rOther) {}; /// Copy constructor. ParallelDistanceCalculator(ParallelDistanceCalculator<TDim> const& rOther) {}; ///@} }; // Class ParallelDistanceCalculator ///@} ///@name Type Definitions ///@{ template< unsigned int TDim> const Kratos::Flags ParallelDistanceCalculator<TDim>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(0)); ///@} ///@name Input and output ///@{ /// input stream function template<unsigned int TDim> inline std::istream& operator >> (std::istream& rIStream, ParallelDistanceCalculator<TDim>& rThis) { return rIStream; } /// output stream function template<unsigned int TDim> inline std::ostream& operator << (std::ostream& rOStream, const ParallelDistanceCalculator<TDim>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED defined
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "libperf.h" #include "libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <sys/poll.h> #include <locale.h> #if HAVE_MPI # include <mpi.h> #elif HAVE_RTE # include<rte.h> #endif #define MAX_BATCH_FILES 32 #define TL_RESOURCE_NAME_NONE "<none>" #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:" enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; } test_type_t; struct perftest_context { ucx_perf_params_t params; const char *server_addr; int port; int mpi; unsigned cpu; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency"}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency"}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency"}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate"}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate"}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate"}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate"}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate"}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate"}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate"}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency"}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth"}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency"}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth"}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency"}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth"}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate"}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate"}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate"}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate"}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate"}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth"}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency"}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, (void*)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final) { static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n"; static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n"; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *test_api_str; const char *test_data_str; test_type_t *test; unsigned i; if (ctx->flags & TEST_FLAG_PRINT_TEST) { for (test = tests; test->name; ++test) { if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) { break; } } if (test->name != NULL) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params)); } } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n"); printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void usage(const struct perftest_context *ctx, const char *program) { static const char* api_names[] = { [UCX_PERF_API_UCT] = "UCT", [UCX_PERF_API_UCP] = "UCP" }; test_type_t *test; int UCS_V_UNUSED rank; #if HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ctx->mpi && (rank != 0)) { return; } #endif #if HAVE_MPI printf(" Note: test can be also launched as an MPI application\n"); printf("\n"); #elif HAVE_RTE printf(" Note: this test can be also launched as an libRTE application\n"); printf("\n"); #endif printf(" Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); printf(" Common options:\n"); printf(" -t <test> test to run:\n"); for (test = tests; test->name; ++test) { printf(" %13s - %s %s\n", test->name, api_names[test->api], test->desc); } printf("\n"); printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n", ctx->params.msg_size_list[0]); printf(" for example: \"-s 16,48,8192,8192,14\"\n"); printf(" -n <iters> number of iterations to run (%ld)\n", ctx->params.max_iter); printf(" -w <iters> number of warm-up iterations (%zu)\n", ctx->params.warmup_iter); printf(" -c <cpu> set affinity to this CPU (off)\n"); printf(" -O <count> maximal number of uncompleted outstanding sends (%u)\n", ctx->params.max_outstanding); printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n", ctx->params.iov_stride); printf(" -T <threads> number of threads in the test (%d), if >1 implies \"-M multi\"\n", ctx->params.thread_count); printf(" -B register memory with NONBLOCK flag\n"); printf(" -b <file> read and execute tests from a batch file: every line in the\n"); printf(" file is a test to run, first word is test name, the rest of\n"); printf(" the line is command-line arguments for the test.\n"); printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port); #if HAVE_MPI printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -m <mem type> memory type of messages\n"); printf(" host - system memory(default)\n"); #if HAVE_CUDA printf(" cuda - NVIDIA GPU memory\n"); printf(" cuda-managed - NVIDIA cuda managed/unified memory\n"); #endif printf(" -h show this help message\n"); printf("\n"); printf(" Output format:\n"); printf(" -N use numeric formatting (thousands separator)\n"); printf(" -f print only final numbers\n"); printf(" -v print CSV-formatted output\n"); printf("\n"); printf(" UCT only:\n"); printf(" -d <device> device to use for testing\n"); printf(" -x <tl> transport to use for testing\n"); printf(" -D <layout> data layout for sender side:\n"); printf(" short - short messages (default, cannot be used for get)\n"); printf(" bcopy - copy-out (cannot be used for atomics)\n"); printf(" zcopy - zero-copy (cannot be used for atomics)\n"); printf(" iov - scatter-gather list (iovec)\n"); printf(" -W <count> flow control window size, for active messages (%u)\n", ctx->params.uct.fc_window); printf(" -H <size> active message header size (%zu)\n", ctx->params.am_hdr_size); printf(" -A <mode> asynchronous progress mode (thread)\n"); printf(" thread - separate progress thread\n"); printf(" signal - signal-based timer\n"); printf("\n"); printf(" UCP only:\n"); printf(" -M <thread> thread support level for progress engine (single)\n"); printf(" single - only the master thread can access\n"); printf(" serialized - one thread can access at a time\n"); printf(" multi - multiple threads can access\n"); printf(" -D <layout>[,<layout>]\n"); printf(" data layout for sender and receiver side (contig)\n"); printf(" contig - Continuous datatype\n"); printf(" iov - Scatter-gather list\n"); printf(" -C use wild-card tag for tag tests\n"); printf(" -U force unexpected flow by using tag probe\n"); printf(" -r <mode> receive mode for stream tests (recv)\n"); printf(" recv : Use ucp_stream_recv_nb\n"); printf(" recv_data : Use ucp_stream_recv_data_nb\n"); printf("\n"); printf(" NOTE: When running UCP tests, transport and device should be specified by\n"); printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n"); printf("\n"); } static const char *__basename(const char *path) { const char *p = strrchr(path, '/'); return (p == NULL) ? path : (p + 1); } static ucs_status_t parse_ucp_datatype_params(const char *optarg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(optarg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(optarg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_message_sizes_params(const char *optarg, ucx_perf_params_t *params) { char *optarg_ptr, *optarg_ptr2; size_t token_num, token_it; const char delim = ','; optarg_ptr = (char *)optarg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; params->msg_size_list = realloc(params->msg_size_list, sizeof(*params->msg_size_list) * token_num); if (NULL == params->msg_size_list) { return UCS_ERR_NO_MEMORY; } optarg_ptr = (char *)optarg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static void init_test_params(ucx_perf_params_t *params) { memset(params, 0, sizeof(*params)); params->api = UCX_PERF_API_LAST; params->command = UCX_PERF_CMD_LAST; params->test_type = UCX_PERF_TEST_TYPE_LAST; params->thread_mode = UCS_THREAD_MODE_SINGLE; params->thread_count = 1; params->async_mode = UCS_ASYNC_MODE_THREAD; params->wait_mode = UCX_PERF_WAIT_MODE_LAST; params->max_outstanding = 1; params->warmup_iter = 10000; params->am_hdr_size = 8; params->alignment = ucs_get_page_size(); params->max_iter = 1000000l; params->max_time = 0.0; params->report_interval = 1.0; params->flags = UCX_PERF_TEST_FLAG_VERBOSE; params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->mem_type = UCT_MD_MEM_TYPE_HOST; params->msg_size_cnt = 1; params->iov_stride = 0; params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; strcpy(params->uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->uct.tl_name, TL_RESOURCE_NAME_NONE); params->msg_size_list = malloc(sizeof(*params->msg_size_list) * params->msg_size_cnt); params->msg_size_list[0] = 8; } static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg) { test_type_t *test; char *optarg2 = NULL; switch (opt) { case 'd': ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name), "%s", optarg); return UCS_OK; case 'x': ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name), "%s", optarg); return UCS_OK; case 't': for (test = tests; test->name; ++test) { if (!strcmp(optarg, test->name)) { params->api = test->api; params->command = test->command; params->test_type = test->test_type; break; } } if (test->name == NULL) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (!strcmp(optarg, "short")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (!strcmp(optarg, "bcopy")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (!strcmp(optarg, "zcopy")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(optarg, &params->ucp.send_datatype)) { optarg2 = strchr(optarg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->ucp.recv_datatype)) { return -1; } } } else { ucs_error("Invalid option argument for -D"); return -1; } return UCS_OK; case 'i': params->iov_stride = atol(optarg); return UCS_OK; case 'n': params->max_iter = atol(optarg); return UCS_OK; case 's': return parse_message_sizes_params(optarg, params); case 'H': params->am_hdr_size = atol(optarg); return UCS_OK; case 'W': params->uct.fc_window = atoi(optarg); return UCS_OK; case 'O': params->max_outstanding = atoi(optarg); return UCS_OK; case 'w': params->warmup_iter = atol(optarg); return UCS_OK; case 'o': params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'U': params->flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE; return UCS_OK; case 'M': if (!strcmp(optarg, "single")) { params->thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (!strcmp(optarg, "serialized")) { params->thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (!strcmp(optarg, "multi")) { params->thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->thread_count = atoi(optarg); params->thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; case 'A': if (!strcmp(optarg, "thread")) { params->async_mode = UCS_ASYNC_MODE_THREAD; return UCS_OK; } else if (!strcmp(optarg, "signal")) { params->async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } case 'r': if (!strcmp(optarg, "recv_data")) { params->flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } else if (!strcmp(optarg, "recv")) { params->flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } return UCS_ERR_INVALID_PARAM; case 'm': if (!strcmp(optarg, "host")) { params->mem_type = UCT_MD_MEM_TYPE_HOST; return UCS_OK; } else if(!strncmp(optarg, "cuda", 4)) { #if HAVE_CUDA params->mem_type = (!strcmp(optarg, "cuda-managed")) ? UCT_MD_MEM_TYPE_CUDA_MANAGED : UCT_MD_MEM_TYPE_CUDA; return UCS_OK; #else ucs_error("not built with cuda support"); return UCS_ERR_INVALID_PARAM; #endif } return UCS_ERR_INVALID_PARAM; default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name, int *line_num, ucx_perf_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } ++(*line_num); argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("in batch file '%s' line %d: -%c %s: %s", file_name, *line_num, c, optarg, ucs_status_string(status)); return status; } } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); init_test_params(&ctx->params); ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; ctx->mpi = mpi_initialized; optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = optarg; } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; ctx->cpu = atoi(optarg); break; case 'P': #if HAVE_MPI ctx->mpi = atoi(optarg) && mpi_initialized; break; #endif case 'h': usage(ctx, __basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, __basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned sync; sync = magic; safe_send(group->connfd, &sync, sizeof(unsigned), progress, arg); sync = 0; safe_recv(group->connfd, &sync, sizeof(unsigned), progress, arg); ucs_assert(sync == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (ret < 0) { ucs_error("setsockopt(SO_REUSEADDR) failed: %m"); status = UCS_ERR_INVALID_PARAM; goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.msg_size_cnt) { ctx->params.msg_size_list = malloc(sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt); if (NULL == ctx->params.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } safe_recv(connfd, ctx->params.msg_size_list, sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.msg_size_cnt) { safe_send(sockfd, ctx->params.msg_size_list, sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = &ctx->sock_rte_group; ctx->params.rte = &sock_rte; ctx->params.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if HAVE_MPI static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp master /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; #elif HAVE_RTE static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { ucs_trace_func(""); #if HAVE_MPI int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; #elif HAVE_RTE rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = group; ctx->params.rte = &ext_rte; ctx->params.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #if HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { cpu_set_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { if (ctx->cpu >= nr_cpus) { ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } CPU_SET(ctx->cpu, &cpuset); ret = sched_setaffinity(0, sizeof(cpuset), &cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = sched_getaffinity(0, sizeof(cpuset), &cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static void clone_params(ucx_perf_params_t *dest, const ucx_perf_params_t *src) { size_t msg_size_list_size; *dest = *src; msg_size_list_size = dest->msg_size_cnt * sizeof(*dest->msg_size_list); dest->msg_size_list = malloc(msg_size_list_size); memcpy(dest->msg_size_list, src->msg_size_list, msg_size_list_size); } static ucs_status_t run_test_recurs(struct perftest_context *ctx, ucx_perf_params_t *parent_params, unsigned depth) { ucx_perf_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; int line_num; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (parent_params->api == UCX_PERF_API_UCP) { if (strcmp(parent_params->uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message", parent_params->uct.dev_name); } if (strcmp(parent_params->uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message", parent_params->uct.tl_name); } } if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(parent_params, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } clone_params(&params, parent_params); line_num = 0; while ((status = read_batch_file(batch_file, ctx->batch_files[depth], &line_num, &params, &ctx->test_names[depth])) == UCS_OK) { status = run_test_recurs(ctx, &params, depth + 1); free(params.msg_size_list); free(ctx->test_names[depth]); ctx->test_names[depth] = NULL; clone_params(&params, parent_params); } free(params.msg_size_list); fclose(batch_file); return UCS_OK; } static ucs_status_t run_test(struct perftest_context *ctx) { ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #if HAVE_MPI mpi_initialized = !isatty(0) && (MPI_Init(&argc, &argv) == 0); #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #if HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out: if (ctx.params.msg_size_list) { free(ctx.params.msg_size_list); } if (mpi_initialized) { #if HAVE_MPI MPI_Finalize(); #endif } return ret; }
irbuilder_unroll_partial_factor_for.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1 // CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 13 // CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0 // CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1) // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]] // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]] // CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 13 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 13, %[[TMP13]] // CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP20]], %[[TMP23]] // CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP25]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP26]] // CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP28]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_for(int n, float *a, float *b, float *c, float *d) { #pragma omp for #pragma omp unroll partial(13) for (int i = 0; i < n; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1 // CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8 // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4 // CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]] // CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP11]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP12:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP12]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 13}
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Expr.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP clause. /// class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit; protected: /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S) { PreInit = S; } OMPClauseWithPreInit(const OMPClause *This) : PreInit(nullptr) { assert(get(This) && "get is not tuned for pre-init."); } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate; protected: /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This), PostUpdate(nullptr) { assert(get(This) && "get is not tuned for post-update."); } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} public: typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// \brief This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. /// class OMPIfClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Location of ':' (if any). SourceLocation ColonLoc; /// \brief Directive name modifier for the clause. OpenMPDirectiveKind NameModifier; /// \brief Name modifier location. SourceLocation NameModifierLoc; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } /// \brief Set directive name modifier for the clause. /// void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// \brief Set location of directive name modifier for the clause. /// void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// \brief Set location of ':'. /// void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. /// OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {} /// \brief Build an empty clause. /// OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), LParenLoc(), Condition(nullptr), ColonLoc(), NameModifier(OMPD_unknown), NameModifierLoc() {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// \brief Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// \brief Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } child_range children() { return child_range(&Condition, &Condition + 1); } }; /// \brief This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. /// class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } child_range children() { return child_range(&Condition, &Condition + 1); } }; /// \brief This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. /// class OMPNumThreadsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads; /// \brief Set condition. /// void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc), NumThreads(NumThreads) {} /// \brief Build an empty clause. /// OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumThreads(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } }; /// \brief This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. /// class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. /// explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Safelen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } child_range children() { return child_range(&Safelen, &Safelen + 1); } }; /// \brief This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. /// class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Simdlen; /// \brief Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// \brief Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// \brief Build an empty clause. /// explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Simdlen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } }; /// \brief This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. /// class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. /// class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. /// void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. /// void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. /// class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. /// void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. /// void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. /// class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind; /// \brief Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// \brief Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Set the first schedule modifier. /// /// \param M Schedule modifier. /// void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// \brief Set the second schedule modifier. /// /// \param M Schedule modifier. /// void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// \brief Set location of the first schedule modifier. /// void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// \brief Set location of the second schedule modifier. /// void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// \brief Set schedule modifier location. /// /// \param M Schedule modifier location. /// void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier /// OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// \brief Build an empty clause. /// explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this), Kind(OMPC_SCHEDULE_unknown), ChunkSize(nullptr) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// \brief Get kind of the clause. /// OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get the first modifier of the clause. /// OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// \brief Get the second modifier of the clause. /// OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get the first modifier location. /// SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// \brief Get the second modifier location. /// SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. /// const Expr *getChunkSize() const { return ChunkSize; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } }; /// \brief This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. /// class OMPOrderedClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPOrderedClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. /// class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. /// class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. /// class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. /// class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. /// class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. /// class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. /// class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. /// class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. /// static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. /// class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. // friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. /// static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. /// class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. /// class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this), ColonLoc(), QualifierLoc(), NameInfo() {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// \brief Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. /// static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// \brief This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. /// class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier; /// \brief Location of linear modifier if any. SourceLocation ModifierLoc; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this), Modifier(OMPC_LINEAR_val), ModifierLoc(), ColonLoc() {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } /// MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// \brief Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// \brief Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); typedef MutableArrayRef<Expr *>::iterator privates_iterator; typedef ArrayRef<const Expr *>::iterator privates_const_iterator; typedef llvm::iterator_range<privates_iterator> privates_range; typedef llvm::iterator_range<privates_const_iterator> privates_const_range; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } typedef MutableArrayRef<Expr *>::iterator updates_iterator; typedef ArrayRef<const Expr *>::iterator updates_const_iterator; typedef llvm::iterator_range<updates_iterator> updates_range; typedef llvm::iterator_range<updates_const_iterator> updates_const_range; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } typedef MutableArrayRef<Expr *>::iterator finals_iterator; typedef ArrayRef<const Expr *>::iterator finals_const_iterator; typedef llvm::iterator_range<finals_iterator> finals_range; typedef llvm::iterator_range<finals_const_iterator> finals_const_range; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. /// class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. /// class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. /// static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. /// class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. /// static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. /// class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// \brief This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. /// class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind; /// \brief Dependency type location. SourceLocation DepLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPDependClause(unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// \brief Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. static OMPDependClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// \brief Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Set the loop counter value for the depend clauses with 'sink|source' kind /// of dependency. Required for codegen. void setCounterValue(Expr *V); /// Get the loop counter value. Expr *getCounterValue(); /// Get the loop counter value. const Expr *getCounterValue() const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// \brief This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. /// class OMPDeviceClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Device number. Stmt *Device; /// \brief Set the device number. /// /// \param E Device number. /// void setDevice(Expr *E) { Device = E; } public: /// \brief Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDeviceClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), LParenLoc(LParenLoc), Device(E) {} /// \brief Build an empty clause. /// OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Device(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// \brief Return device number. Expr *getDevice() const { return cast<Expr>(Device); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } child_range children() { return child_range(&Device, &Device + 1); } }; /// \brief This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. /// class OMPThreadsClause : public OMPClause { public: /// \brief Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. /// class OMPSIMDClause : public OMPClause { public: /// \brief Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: // \brief Class that represents a component of a mappable expression. E.g. // for an expression S.a, the first component is a declaration reference // expression associated with 'S' and the second is a member expression // associated with the field declaration 'a'. If the expression is an array // subscript it may not have any associated declaration. In that case the // associated declaration is set to nullptr. class MappableComponent { // \brief Expression associated with the component. Expr *AssociatedExpression = nullptr; // \brief Declaration associated with the declaration. If the component does // not have a declaration (e.g. array subscripts or section), this is set to // nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() {} explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // \brief List of components of an expression. This first one is the whole // expression and the last one is the base expression. typedef SmallVector<MappableComponent, 8> MappableExprComponentList; typedef ArrayRef<MappableComponent> MappableExprComponentListRef; // \brief List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. typedef SmallVector<MappableExprComponentList, 8> MappableExprComponentLists; typedef ArrayRef<MappableExprComponentList> MappableExprComponentListsRef; protected: // \brief Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // \brief Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<ValueDecl *> Declarations); }; /// \brief This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// \brief Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// \brief Number of component lists in this clause. unsigned NumComponentLists; /// \brief Total number of components in this clause. unsigned NumComponents; protected: /// \brief Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// \brief Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// \brief Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::DenseMap<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// \brief Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause - one /// list for each expression in the clause. /// \param NumComponents Total number of expression components in the clause. /// OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} public: /// \brief Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// \brief Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// \brief Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// \brief Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// \brief Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), RemainingLists(0u), PrevListSize(0u), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// \brief Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; typedef llvm::iterator_range<const_component_lists_iterator> const_component_lists_range; /// \brief Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// \brief Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. typedef ArrayRef<ValueDecl *>::iterator const_all_decls_iterator; typedef llvm::iterator_range<const_all_decls_iterator> const_all_decls_range; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } typedef ArrayRef<unsigned>::iterator const_all_num_lists_iterator; typedef llvm::iterator_range<const_all_num_lists_iterator> const_all_num_lists_range; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } typedef ArrayRef<unsigned>::iterator const_all_lists_sizes_iterator; typedef llvm::iterator_range<const_all_lists_sizes_iterator> const_all_lists_sizes_range; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } typedef ArrayRef<MappableComponent>::iterator const_all_components_iterator; typedef llvm::iterator_range<const_all_components_iterator> const_all_components_range; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } }; /// \brief This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. /// class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Map type modifier for the 'map' clause. OpenMPMapClauseKind MapTypeModifier; /// \brief Map type for the 'map' clause. OpenMPMapClauseKind MapType; /// \brief Is this an implicit map type or not. bool MapTypeIsImplicit; /// \brief Location of the map type. SourceLocation MapLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Set type modifier for the clause. /// /// \param T Type Modifier for the clause. /// void setMapTypeModifier(OpenMPMapClauseKind T) { MapTypeModifier = T; } /// \brief Set type for the clause. /// /// \param T Type for the clause. /// void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// \brief Set type location. /// /// \param TLoc Type location. /// void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapTypeModifier Map type modifier. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapTypeModifier(MapTypeModifier), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapTypeModifier(OMPC_MAP_unknown), MapType(OMPC_MAP_unknown), MapTypeIsImplicit(false), MapLoc() {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param TypeModifier Map type modifier. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. /// static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, OpenMPMapClauseKind TypeModifier, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// \brief Creates an empty clause with the place for for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); /// \brief Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// \brief Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// \brief Fetches the map type modifier for the clause. OpenMPMapClauseKind getMapTypeModifier() const LLVM_READONLY { return MapTypeModifier; } /// \brief Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } }; /// \brief This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. /// class OMPNumTeamsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief NumTeams number. Stmt *NumTeams; /// \brief Set the NumTeams number. /// /// \param E NumTeams number. /// void setNumTeams(Expr *E) { NumTeams = E; } public: /// \brief Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumTeamsClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTeams(E) {} /// \brief Build an empty clause. /// OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumTeams(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// \brief Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } }; /// \brief This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. /// class OMPThreadLimitClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief ThreadLimit number. Stmt *ThreadLimit; /// \brief Set the ThreadLimit number. /// /// \param E ThreadLimit number. /// void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// \brief Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPThreadLimitClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), LParenLoc(LParenLoc), ThreadLimit(E) {} /// \brief Build an empty clause. /// OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), ThreadLimit(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// \brief Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } }; /// \brief This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. /// class OMPPriorityClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Priority number. Stmt *Priority; /// \brief Set the Priority number. /// /// \param E Priority number. /// void setPriority(Expr *E) { Priority = E; } public: /// \brief Build 'priority' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc), Priority(E) {} /// \brief Build an empty clause. /// OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Priority(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// \brief Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } child_range children() { return child_range(&Priority, &Priority + 1); } }; /// \brief This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. /// class OMPGrainsizeClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Grainsize; /// \brief Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// \brief Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc), Grainsize(Size) {} /// \brief Build an empty clause. /// explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Grainsize(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } }; /// \brief This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. /// class OMPNogroupClause : public OMPClause { public: /// \brief Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. /// class OMPNumTasksClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *NumTasks; /// \brief Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// \brief Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNumTasksClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTasks(Size) {} /// \brief Build an empty clause. /// explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumTasks(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } }; /// \brief This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. /// class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Hint expression of the 'hint' clause. Stmt *Hint; /// \brief Set hint expression. /// void setHint(Expr *H) { Hint = H; } public: /// \brief Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// \brief Build an empty clause. /// OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Hint(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } child_range children() { return child_range(&Hint, &Hint + 1); } }; /// \brief This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. /// class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind; /// \brief Start location of the schedule kind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// \brief Build an empty clause. /// explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this), Kind(OMPC_DIST_SCHEDULE_unknown), ChunkSize(nullptr) {} /// \brief Get kind of the clause. /// OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. /// const Expr *getChunkSize() const { return ChunkSize; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } }; /// \brief This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. /// class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier; /// \brief Locations of modifiers. SourceLocation ModifierLoc; /// \brief A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind; /// \brief Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// \brief Set defaultmap kind. /// /// \param K Defaultmap kind. /// void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// \brief Set the defaultmap modifier. /// /// \param M Defaultmap modifier. /// void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// \brief Set location of the defaultmap modifier. /// void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. /// void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// \brief Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier /// OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// \brief Build an empty clause. /// explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()), Modifier(OMPC_DEFAULTMAP_MODIFIER_unknown), Kind(OMPC_DEFAULTMAP_unknown) {} /// \brief Get kind of the clause. /// OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// \brief Get the modifier of the clause. /// OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// \brief Get the modifier location. /// SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. /// class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPToClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_to, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPToClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_to, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} public: /// \brief Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// static OMPToClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPToClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_to; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } }; /// \brief This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. /// class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPFromClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_from, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPFromClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_from, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} public: /// \brief Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// static OMPFromClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPFromClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_from; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. /// class OMPUseDevicePtrClause final : public OMPVarListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects<OMPUseDevicePtrClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPUseDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPUseDevicePtrClause>(OMPC_use_device_ptr, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPUseDevicePtrClause(unsigned N) : OMPVarListClause<OMPUseDevicePtrClause>( OMPC_use_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPUseDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPUseDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_use_device_ptr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. /// class OMPIsDevicePtrClause final : public OMPVarListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects<OMPIsDevicePtrClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPIsDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPIsDevicePtrClause>(OMPC_is_device_ptr, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. /// explicit OMPIsDevicePtrClause(unsigned N) : OMPVarListClause<OMPIsDevicePtrClause>( OMPC_is_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPIsDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPIsDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_is_device_ptr; } }; } // end namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
GB_unop__identity_uint64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fc64) // op(A') function: GB (_unop_tran__identity_uint64_fc64) // C type: uint64_t // A type: GxB_FC64_t // cast: uint64_t cij = GB_cast_to_uint64_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fc64) ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dmml.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/openmp/blrm/dmml.c * @version 0.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" int starsh_blrm__dmml_omp(STARSH_blrm *matrix, int nrhs, double alpha, double *A, int lda, double beta, double *B, int ldb) //! Multiply blr-matrix by dense matrix. /*! Performs `C=alpha*A*B+beta*C` with @ref STARSH_blrm `A` and dense matrices * `B` and `C`. All the integer types are int, since they are used in BLAS * calls. * * @param[in] matrix: Pointer to @ref STARSH_blrm object. * @param[in] nrhs: Number of right hand sides. * @param[in] alpha: Scalar mutliplier. * @param[in] A: Dense matrix, right havd side. * @param[in] lda: Leading dimension of `A`. * @param[in] beta: Scalar multiplier. * @param[in] B: Resulting dense matrix. * @param[in] ldb: Leading dimension of B. * @return Error code @ref STARSH_ERRNO. * @ingroup blrm * */ { STARSH_blrm *M = matrix; STARSH_blrf *F = M->format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nrows = P->shape[0]; STARSH_int ncols = P->shape[P->ndim-1]; // Shorcuts to information about clusters STARSH_cluster *R = F->row_cluster; STARSH_cluster *C = F->col_cluster; void *RD = R->data, *CD = C->data; // Number of far-field and near-field blocks STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near, bi; char symm = F->symm; int maxrank = 100; int maxnb = nrows/F->nbrows; // Setting B = beta*B if(beta == 0.) #pragma omp parallel for schedule(static) for(int i = 0; i < nrows; i++) for(int j = 0; j < nrhs; j++) B[j*ldb+i] = 0.; else #pragma omp parallel for schedule(static) for(int i = 0; i < nrows; i++) for(int j = 0; j < nrhs; j++) B[j*ldb+i] *= beta; double *temp_D, *temp_B; int num_threads; #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); if(M->onfly == 0) { STARSH_MALLOC(temp_D, num_threads*nrhs*maxrank); } else { STARSH_MALLOC(temp_D, num_threads*maxnb*maxnb); } STARSH_MALLOC(temp_B, num_threads*nrhs*nrows); #pragma omp parallel { double *out = temp_B+omp_get_thread_num()*nrhs*nrows; for(int j = 0; j < nrhs*nrows; j++) out[j] = 0.; } int ldout = nrows; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_far; bi++) { // Get indexes of corresponding block row and block column STARSH_int i = F->block_far[2*bi]; STARSH_int j = F->block_far[2*bi+1]; // Get sizes and rank int nrows = R->size[i]; int ncols = C->size[j]; int rank = M->far_rank[bi]; // Get pointers to data buffers double *U = M->far_U[bi]->data, *V = M->far_V[bi]->data; int info = 0; double *D = temp_D+omp_get_thread_num()*nrhs*maxrank; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; // Multiply low-rank matrix in U*V^T format by a dense matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, rank, alpha, U, nrows, D, rank, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Multiply low-rank matrix in V*U^T format by a dense matrix // U and V are simply swapped in case of symmetric block cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, nrows, 1.0, U, nrows, A+R->start[i], lda, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, ncols, nrhs, rank, alpha, V, ncols, D, rank, 1.0, out+C->start[j], ldout); } } if(M->onfly == 1) // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_near; bi++) { // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; int info = 0; double *D = temp_D+omp_get_thread_num()*maxnb*maxnb; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; // Fill temporary buffer with elements of corresponding block kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j], RD, CD, D, nrows); // Multiply 2 dense matrices cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Repeat in case of symmetric matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols, nrhs, nrows, alpha, D, nrows, A+R->start[i], lda, 1.0, out+C->start[j], ldout); } } else // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(bi = 0; bi < nblocks_near; bi++) { // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; // Get pointers to data buffers double *D = M->near_D[bi]->data; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; // Multiply 2 dense matrices cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Repeat in case of symmetric matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols, nrhs, nrows, alpha, D, nrows, A+R->start[i], lda, 1.0, out+C->start[j], ldout); } } #pragma omp parallel for schedule(static) for(int i = 0; i < ldout; i++) for(int j = 0; j < nrhs; j++) for(int k = 0; k < num_threads; k++) B[j*ldb+i] += temp_B[(k*nrhs+j)*ldout+i]; free(temp_B); free(temp_D); return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(16*t2-Nz,4)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(8*t1+Ny+13,4)),floord(16*t2+Ny+12,4)),floord(16*t1-16*t2+Nz+Ny+11,4));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-508,512)),ceild(4*t3-Ny-508,512));t4<=min(min(min(min(floord(4*t3+Nx,512),floord(Nt+Nx-4,512)),floord(8*t1+Nx+13,512)),floord(16*t2+Nx+12,512)),floord(16*t1-16*t2+Nz+Nx+11,512));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),4*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),4*t3+2),512*t4+510),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_subassign_04.c
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed #define GB_FREE_WORK GB_FREE_TWO_SLICE #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: Z=A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- GB_SUBASSIGN_TWO_SLICE (A, S) ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get A(:,j) and S(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and A(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = Si [pS] ; int64_t iA = Ai [pA] ; if (iS < iA) { // ----[C . 1] or [X . 1]----------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]----------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get A(:,j) and S(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and A(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = Si [pS] ; int64_t iA = Ai [pA] ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]-------------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = Ai [pA] ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
conv_dw_dilation_kernel_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #ifndef __CONV_DW_DILATION_KERNEL_ARM_H_ #define __CONV_DW_DILATION_KERNEL_ARM_H_ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "convolution_param.h" #include "conv_dw_k5_k7_kernel_arm.h" int conv_dw_dilation_run(float* input_buf, float* weight_buf, float* bias, float* output_buf, int input_h, int input_w, int channel, int pad, int activation, int num_thread) { int channel_size = input_h * input_w; int mid_w = input_w - pad * 2; int mid_block_end = (mid_w & -4) + pad; int mid_end = mid_w + pad; int w = 0; #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float* input_buf_c = input_buf + c * channel_size; float* output_buf_c = output_buf + c * channel_size; float* weight_buf_c = weight_buf + c * 9; float bias_c = bias ? bias[c] : 0; for (int h = 0; h < pad; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]), vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]), vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } for (int h = pad; h < input_h - pad; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]), vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]), vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]), vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]), vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } for (int h = input_h - pad; h < input_h; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]), vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]), vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } } return 0; } #endif
CPUImplQPU.h
/* Copyright (c) 2017-2020 Origin Quantum Computing. All Right Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef CPU_QUANTUM_GATE_H #define CPU_QUANTUM_GATE_H #include "Core/VirtualQuantumProcessor/QPUImpl.h" #include "Core/Utilities/Tools/Utils.h" #include <stdio.h> #include <iostream> #include <vector> #ifndef SQ2 #define SQ2 (1 / 1.4142135623731) #endif #ifndef PI #define PI 3.14159265358979323846 #endif #define DECL_GATE_MATRIX(NAME)\ extern const qcomplex_t NAME##00;\ extern const qcomplex_t NAME##01;\ extern const qcomplex_t NAME##10;\ extern const qcomplex_t NAME##11; #define DECL_ANGLE_GATE_MATRIX(NAME)\ extern const double NAME##_Nx;\ extern const double NAME##_Ny;\ extern const double NAME##_Nz;\ #define REGISTER_GATE_MATRIX(NAME,U00,U01,U10,U11)\ extern const qcomplex_t NAME##00 = U00;\ extern const qcomplex_t NAME##01 = U01;\ extern const qcomplex_t NAME##10 = U10;\ extern const qcomplex_t NAME##11 = U11; #define REGISTER_ANGLE_GATE_MATRIX(NAME,Nx,Ny,Nz)\ extern const double NAME##_Nx = Nx;\ extern const double NAME##_Ny = Ny;\ extern const double NAME##_Nz = Nz;\ #define CONST_GATE(NAME) \ QError \ NAME(size_t qn, bool isConjugate, double error_rate)\ { \ const_single_qubit_gate(NAME, qn,isConjugate,error_rate);\ return qErrorNone; \ } #define CONTROL_CONST_GATE(NAME) \ QError \ NAME(size_t qn, Qnum& vControlBit,bool isConjugate , double error_rate)\ { \ control_const_single_qubit_gate(NAME, qn,vControlBit,isConjugate,error_rate);\ return qErrorNone; \ } #define SINGLE_ANGLE_GATE(NAME) \ QError \ NAME(size_t qn,double theta,bool isConjugate, double error_rate)\ { \ single_qubit_angle_gate(NAME, qn,theta,isConjugate,error_rate);\ return qErrorNone; \ } #define CONTROL_SINGLE_ANGLE_GATE(NAME) \ QError \ NAME(size_t qn, double theta,Qnum& vControlBit,bool isConjugate, double error_rate)\ { \ control_single_qubit_angle_gate(NAME, qn, theta,vControlBit,isConjugate, error_rate); \ return qErrorNone; \ } #define const_single_qubit_gate(GATE_NAME,qn,isConjugate,error_rate) \ single_gate<GATE_NAME##00,GATE_NAME##01,GATE_NAME##10,GATE_NAME##11>(qn,isConjugate,error_rate) #define control_const_single_qubit_gate(GATE_NAME,qn,vControlBit,isConjugate,error_rate) \ control_single_gate<GATE_NAME##00,GATE_NAME##01,GATE_NAME##10,GATE_NAME##11>\ (qn,vControlBit,isConjugate,error_rate) #define single_qubit_angle_gate(GATE_NAME,qn,theta,isConjugate,error_rate) \ single_angle_gate<GATE_NAME##_Nx,GATE_NAME##_Ny,GATE_NAME##_Nz>(qn,theta,isConjugate,error_rate) #define control_single_qubit_angle_gate(GATE_NAME,qn,theta,vControlBit,isConjugate,error_rate) \ control_single_angle_gate<GATE_NAME##_Nx,GATE_NAME##_Ny,GATE_NAME##_Nz> \ (qn,theta,vControlBit,isConjugate,error_rate) DECL_GATE_MATRIX(Hadamard) DECL_GATE_MATRIX(X) DECL_GATE_MATRIX(Y) DECL_GATE_MATRIX(Z) DECL_GATE_MATRIX(T) DECL_GATE_MATRIX(S) DECL_GATE_MATRIX(P0) DECL_GATE_MATRIX(P1) DECL_ANGLE_GATE_MATRIX(RX_GATE) DECL_ANGLE_GATE_MATRIX(RY_GATE) DECL_ANGLE_GATE_MATRIX(RZ_GATE) /** * @brief QPU implementation by CPU model * @ingroup VirtualQuantumProcessor */ class CPUImplQPU : public QPUImpl { public: vQParam qubit2stat; QGateParam & findgroup(size_t qn); CPUImplQPU(); CPUImplQPU(size_t); ~CPUImplQPU(); inline bool TensorProduct(QGateParam& qgroup0, QGateParam& qgroup1) { if (qgroup0.qVec[0] == qgroup1.qVec[0]) { return false; } size_t length = qgroup0.qstate.size(); size_t slabel = qgroup0.qVec[0]; for (auto iter0 = qgroup1.qstate.begin(); iter0 != qgroup1.qstate.end(); iter0++) { for (auto i = 0; i < length; i++) { //*iter1 *= *iter; qgroup0.qstate.push_back(qgroup0.qstate[i] * (*iter0)); } } qgroup0.qstate.erase(qgroup0.qstate.begin(), qgroup0.qstate.begin() + length); qgroup0.qVec.insert(qgroup0.qVec.end(), qgroup1.qVec.begin(), qgroup1.qVec.end()); qgroup1.enable = false; return true; } template<const qcomplex_t& U00, const qcomplex_t& U01, const qcomplex_t& U10, const qcomplex_t& U11> QError single_gate(size_t qn, bool isConjugate, double error_rate) { qcomplex_t alpha; qcomplex_t beta; QGateParam& qgroup = findgroup(qn); size_t j; size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin(); qcomplex_t C00 = U00; qcomplex_t C01 = U01; qcomplex_t C10 = U10; qcomplex_t C11 = U11; if (isConjugate) { qcomplex_t temp; C00 = qcomplex_t(C00.real(), -C00.imag()); C01 = qcomplex_t(C01.real(), -C01.imag()); C10 = qcomplex_t(C10.real(), -C10.imag()); C11 = qcomplex_t(C11.real(), -C11.imag()); temp = C01;; C01 = U10; C10 = temp; } //#pragma omp parallel for private(j,alpha,beta) for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2) { for (j = i; j<i + ststep; j++) { alpha = qgroup.qstate[j]; beta = qgroup.qstate[j + ststep]; qgroup.qstate[j] = C00 * alpha + C01 * beta; /* in j,the goal qubit is in |0> */ qgroup.qstate[j + ststep] = C10 * alpha + C11 * beta; /* in j+ststep,the goal qubit is in |1> */ } } return qErrorNone; } QError U1_GATE(size_t qn, double theta,bool isConjugate,double error_rate) { QGateParam& qgroup = findgroup(qn); size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin(); qcomplex_t C00 = (1,0); qcomplex_t C01 = (0,0); qcomplex_t C10 = (0,0); qcomplex_t C11 = isConjugate? qcomplex_t(cos(-theta), sin(-theta)) :qcomplex_t(cos(theta),sin(theta)); for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2) { for (size_t j = i; j < i + ststep; ++j) { qgroup.qstate[j + ststep] = C11 * qgroup.qstate[j + ststep]; } } return qErrorNone; } template<const double& Nx, const double& Ny, const double& Nz> QError single_angle_gate(size_t qn, double theta, bool isConjugate, double error_rate) { qcomplex_t alpha; qcomplex_t beta; qcomplex_t U00(cos(theta / 2), -sin(theta / 2)*Nz); qcomplex_t U01(-sin(theta / 2)*Ny, -sin(theta / 2)*Nx); qcomplex_t U10(sin(theta / 2)*Ny, -sin(theta / 2)*Nx); qcomplex_t U11(cos(theta / 2), sin(theta / 2)*Nz); if (isConjugate) { qcomplex_t temp; U00 = qcomplex_t(U00.real(), -U00.imag()); U01 = qcomplex_t(U01.real(), -U01.imag()); U10 = qcomplex_t(U10.real(), -U10.imag()); U11 = qcomplex_t(U11.real(), -U11.imag()); temp = U01; U01 = U10; U10 = temp; } QGateParam& qgroup = findgroup(qn); size_t j; size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin(); //#pragma omp parallel for private(j,alpha,beta) for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2) { for (j = i; j<i + ststep; j++) { alpha = qgroup.qstate[j]; beta = qgroup.qstate[j + ststep]; qgroup.qstate[j] = U00 * alpha + U01 * beta; /* in j,the goal qubit is in |0> */ qgroup.qstate[j + ststep] = U10 * alpha + U11 * beta; /* in j+ststep,the goal qubit is in |1> */ } } return qErrorNone; } template<const double& Nx, const double& Ny, const double& Nz> QError control_single_angle_gate(size_t qn, double theta, Qnum vControlBit, bool isConjugate, double error_rate) { if (QPanda::RandomNumberGenerator() > error_rate) { QGateParam& qgroup0 = findgroup(qn); for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++) { TensorProduct(qgroup0, findgroup(*iter)); } size_t M = 1ull << (qgroup0.qVec.size() - vControlBit.size()); size_t x; size_t n = qgroup0.qVec.size(); size_t ststep = 1ull << (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), qn) - qgroup0.qVec.begin()); size_t index = 0; size_t block = 0; qcomplex_t alpha, beta; qcomplex_t U00(cos(theta / 2), -sin(theta / 2)*Nz); qcomplex_t U01(-sin(theta / 2)*Ny, -sin(theta / 2)*Nx); qcomplex_t U10(sin(theta / 2)*Ny, -sin(theta / 2)*Nx); qcomplex_t U11(cos(theta / 2), sin(theta / 2)*Nz); if (isConjugate) { qcomplex_t temp; U00 = qcomplex_t(U00.real(), -U00.imag()); U01 = qcomplex_t(U01.real(), -U01.imag()); U10 = qcomplex_t(U10.real(), -U10.imag()); U11 = qcomplex_t(U11.real(), -U11.imag()); temp = U01; U01 = U10; U10 = temp; } Qnum qvtemp; for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++) { size_t stemp = (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), *iter) - qgroup0.qVec.begin()); block += 1ull << stemp; qvtemp.push_back(stemp); } sort(qvtemp.begin(), qvtemp.end()); Qnum::iterator qiter; size_t j; //#pragma omp parallel for private(j,alpha,beta,index,x,qiter) for (size_t i = 0; i < M; i++) { index = 0; x = i; qiter = qvtemp.begin(); for (j = 0; j < n; j++) { while (qiter != qvtemp.end() && *qiter == j) { qiter++; j++; } //index += ((x % 2)*(1ull << j)); index += ((x & 1) << j); x >>= 1; } /* * control qubits are 1,target qubit is 0 */ index = index + block - ststep; alpha = qgroup0.qstate[index]; beta = qgroup0.qstate[index + ststep]; qgroup0.qstate[index] = alpha * U00 + beta * U01; qgroup0.qstate[index + ststep] = alpha * U10 + beta * U11; } } return qErrorNone; } template<const qcomplex_t& U00, const qcomplex_t& U01, const qcomplex_t& U10, const qcomplex_t& U11> QError control_single_gate( size_t qn, Qnum vControlBit, bool isConjugate, double error_rate) { if (QPanda::RandomNumberGenerator() > error_rate) { QGateParam& qgroup0 = findgroup(qn); for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++) { TensorProduct(qgroup0, findgroup(*iter)); } size_t M = 1ull << (qgroup0.qVec.size() - vControlBit.size()); size_t x; size_t n = qgroup0.qVec.size(); size_t ststep = 1ull << (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), qn) - qgroup0.qVec.begin()); size_t index = 0; size_t block = 0; qcomplex_t alpha, beta; qcomplex_t C00 = U00; qcomplex_t C01 = U01; qcomplex_t C10 = U10; qcomplex_t C11 = U11; if (isConjugate) { qcomplex_t temp; C00 = qcomplex_t(C00.real(), -C00.imag()); C01 = qcomplex_t(C01.real(), -C01.imag()); C10 = qcomplex_t(C10.real(), -C10.imag()); C11 = qcomplex_t(C11.real(), -C11.imag()); temp = C01; C01 = U10; C10 = temp; } Qnum qvtemp; for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++) { size_t stemp = (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), *iter) - qgroup0.qVec.begin()); block += 1ull << stemp; qvtemp.push_back(stemp); } sort(qvtemp.begin(), qvtemp.end()); Qnum::iterator qiter; size_t j; //#pragma omp parallel for private(j,alpha,beta,index,x,qiter) for (size_t i = 0; i < M; i++) { index = 0; x = i; qiter = qvtemp.begin(); for (j = 0; j < n; j++) { while (qiter != qvtemp.end() && *qiter == j) { qiter++; j++; } //index += ((x % 2)*(1ull << j)); index += ((x & 1) << j); x >>= 1; } /* * control qubits are 1,target qubit is 0 */ index = index + block - ststep; alpha = qgroup0.qstate[index]; beta = qgroup0.qstate[index + ststep]; qgroup0.qstate[index] = alpha * C00 + beta * C01; qgroup0.qstate[index + ststep] = alpha * C10 + beta * C11; } } return qErrorNone; } //single qubit gate and control-single qubit gate CONST_GATE(P0); CONST_GATE(P1); CONST_GATE(X); CONST_GATE(Y); CONST_GATE(Z); CONST_GATE(Hadamard); CONST_GATE(T); CONST_GATE(S); SINGLE_ANGLE_GATE(RX_GATE); SINGLE_ANGLE_GATE(RY_GATE); SINGLE_ANGLE_GATE(RZ_GATE); CONTROL_SINGLE_ANGLE_GATE(RX_GATE); CONTROL_SINGLE_ANGLE_GATE(RY_GATE); CONTROL_SINGLE_ANGLE_GATE(RZ_GATE); CONTROL_CONST_GATE(Hadamard); CONTROL_CONST_GATE(X); //CCCC-NOT CONTROL_CONST_GATE(Y); CONTROL_CONST_GATE(Z); CONTROL_CONST_GATE(T); CONTROL_CONST_GATE(S); CONTROL_CONST_GATE(P0); CONTROL_CONST_GATE(P1); //define const CNOT,CZ,ISWAP,SQISWAP inline QError CNOT(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate) { Qnum qvtemp; qvtemp.push_back(qn_0); qvtemp.push_back(qn_1); X(qn_1, qvtemp, isConjugate, error_rate); //qn_1 is target return qErrorNone; } inline QError CNOT(size_t qn_0, size_t qn_1, Qnum& vControlBit, bool isConjugate, double error_rate) { X(qn_1, vControlBit, isConjugate, error_rate); //qn_1 is target return qErrorNone; } QError iSWAP(size_t qn_0, size_t qn_1, double theta, bool isConjugate, double); QError iSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit, double theta, bool isConjugate, double); inline QError iSWAP(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate) { iSWAP(qn_0, qn_1, PI / 2, isConjugate, error_rate); return qErrorNone; } inline QError iSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit, bool isConjugate, double error_rate) { iSWAP(qn_0, qn_1, vControlBit, PI / 2, isConjugate, error_rate); return qErrorNone; } inline QError SqiSWAP(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate) { iSWAP(qn_0, qn_1, PI / 4, isConjugate, error_rate); return qErrorNone; } inline QError SqiSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit, bool isConjugate, double error_rate) { iSWAP(qn_0, qn_1, vControlBit, PI / 4, isConjugate, error_rate); return qErrorNone; } QError CR(size_t qn_0, size_t qn_1, double theta, bool isConjugate, double error_rate); QError CR(size_t qn_0, size_t qn_1, Qnum& vControlBit, double theta, bool isConjugate, double error_rate); inline QError CZ(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate) { CR(qn_0, qn_1, PI, isConjugate, error_rate); return qErrorNone; } inline QError CZ(size_t qn_0, size_t qn_1, Qnum& vControlBit, bool isConjugate, double error_rate) { CR(qn_0, qn_1, vControlBit, PI, isConjugate, error_rate); return qErrorNone; } //define unitary single/double quantum gate QError unitarySingleQubitGate(size_t qn, QStat& matrix, bool isConjugate, GateType); QError controlunitarySingleQubitGate(size_t qn, Qnum& vControlBit, QStat& matrix, bool isConjugate, GateType); QError unitaryDoubleQubitGate(size_t qn_0, size_t qn_1, QStat& matrix, bool isConjugate, GateType); QError controlunitaryDoubleQubitGate(size_t qn_0, size_t qn_1, Qnum& vControlBit, QStat& matrix, bool isConjugate, GateType); QError DiagonalGate(Qnum& vQubit, QStat & matrix, bool isConjugate, double error_rate); QError controlDiagonalGate(Qnum& vQubit, QStat & matrix, Qnum& vControlBit, bool isConjugate, double error_rate); QStat getQState(); QError Reset(size_t qn); bool qubitMeasure(size_t qn); QError pMeasure(Qnum& qnum, prob_tuple &mResult, int select_max=-1); QError pMeasure(Qnum& qnum, prob_vec &mResult); QError initState(size_t head_rank, size_t rank_size, size_t qubit_num); inline QError P00(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate) { QStat P00_matrix = { 1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,0 }; return unitaryDoubleQubitGate(qn_0, qn_1, P00_matrix, isConjugate,GateType::P00_GATE); } inline QError P11(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate) { QStat P11_matrix = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,1 }; return unitaryDoubleQubitGate(qn_0, qn_1, P11_matrix, isConjugate,GateType::P11_GATE); } }; class CPUImplQPUWithOracle : public CPUImplQPU { public: QError controlOracularGate(std::vector<size_t> bits, std::vector<size_t> controlbits, bool is_dagger, std::string name); }; #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-508,512)),ceild(32*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(8*t1+Nx+13,512)),floord(16*t2+Nx+12,512)),floord(32*t3+Nx+28,512)),floord(16*t1-16*t2+Nz+Nx+11,512));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),512*t4+510),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
Par-16-ParForLoopBarrier.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; #pragma omp parallel { #pragma omp for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; } #pragma omp barrier } return 0; }
2866.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp target teams distribute schedule(dynamic, 16) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp target teams distribute schedule(dynamic, 16) for (i = 0; i < _PB_N; i++) { #pragma omp for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute schedule(dynamic, 16) for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
GB_unop__minv_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_int32_int32) // op(A') function: GB (_unop_tran__minv_int32_int32) // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 32) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_int32_int32) ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 32) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 32) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_int32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <nnvm/node.h> #include <mxnet/imperative.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/storage.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_ONEDNN == 1 #include "../operator/nn/mkldnn/mkldnn_base-inl.h" #endif #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) #include <windows.h> #else #include <unistd.h> #endif namespace mxnet { namespace common { #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) inline size_t current_process_id() { return ::GetCurrentProcessId(); } #else inline size_t current_process_id() { return getpid(); } #endif /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const mxnet::TShape shape = input.shape(); const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx); const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr); const mxnet::TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } inline std::string attr_value_string(const nnvm::NodeAttrs& attrs, const std::string& attr_name, std::string default_val = "") { if (attrs.dict.find(attr_name) == attrs.dict.end()) { return default_val; } return attrs.dict.at(attr_name); } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_ONEDNN == 1 if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask " << ctx.dev_mask(); return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } template <> constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() { return size_t(2) << 14; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype, std::vector<NDArray> *vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } /*! * \brief parallelize copy by OpenMP. */ template<typename DType> inline void ParallelCopy(DType* dst, const DType* src, index_t size) { static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= copy_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] = src[i]; } } else { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif std::memcpy(dst, src, sizeof(DType) * size); #pragma GCC diagnostic pop } } /*! * \breif parallelize add by OpenMP */ template<typename DType> inline void ParallelAdd(DType* dst, const DType* src, index_t size) { static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= add_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } else { for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } } /*! * \brief If numpy compatibility is turned off (default), the shapes passed in * by users follow the legacy shape definition: * 1. 0 ndim means the shape is completely unknown. * 2. 0 dim size means the dim size is unknown. * We need to convert those shapes to use the numpy shape definition: * 1. 0 ndim means it's a scalar tensor. * 2. -1 ndim means the shape is unknown. * 3. 0 dim size means no elements in that dimension. * 4. -1 dim size means the dimension's size is unknown. * so that operator's infer shape function can work in backend. * \param shape to be converted. * Note: It is possible that the shape to be converted is already * numpy compatible. For example, when a subgraph operator's infer * shape function is called from the infer shape pass of the whole * graph, its input/output shapes have been converted to numpy * compatible shapes. */ inline void ConvertToNumpyShape(mxnet::TShape* shape) { if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown *shape = mxnet::TShape(); // unknown shape ndim = -1 } else { for (int j = 0; j < shape->ndim(); ++j) { if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown (*shape)[j] = -1; // unknown dim size = -1 } } } } inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToNumpyShape(&(shapes->at(i))); } } /*! * \brief This is function is used to convert shapes returned by * the infer shape functions/pass to the legacy shape definition. */ inline void ConvertToLegacyShape(mxnet::TShape* shape) { if (!mxnet::ndim_is_known(*shape)) { *shape = mxnet::TShape(0, -1); } else { for (int j = 0; j < shape->ndim(); ++j) { if (!mxnet::dim_size_is_known(*shape, j)) { (*shape)[j] = 0; } } } } inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToLegacyShape(&(shapes->at(i))); } } void ExecuteMonInputCallback( const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays, size_t nid, const std::function<void(const char *, const char *, void *)> &monitor_callback); void ExecuteMonOutputCallback( const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays, size_t nid, const std::function<void(const char *, const char *, void *)> &monitor_callback); inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) { // convert negative axes to positive values const int ndim = src.ndim(); mxnet::TShape axes = src; for (int i = 0; i < ndim; ++i) { if (axes[i] < 0) { axes[i] += ndim; } CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")"; } return axes; } inline bool is_float(const int dtype) { return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16; } inline bool is_int(const int dtype) { return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 || dtype == mshadow::kInt32 || dtype == mshadow::kInt64; } inline int get_more_precise_type(const int type1, const int type2) { if (type1 == type2) return type1; if (is_float(type1) && is_float(type2)) { if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) { return mshadow::kFloat64; } if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) { return mshadow::kFloat32; } return mshadow::kFloat16; } else if (is_float(type1) || is_float(type2)) { return is_float(type1) ? type1 : type2; } if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) { return mshadow::kInt64; } if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) { return mshadow::kInt32; } CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8))) << "1 is UInt8 and 1 is Int8 should not get here"; if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) { return mshadow::kUint8; } return mshadow::kInt8; } inline int np_binary_out_infer_type(const int type1, const int type2) { if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) { return mshadow::kInt32; } return get_more_precise_type(type1, type2); } inline const std::string NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) { // obtain the profiler scope name, if assigned previously std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR; const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict; const std::unordered_map<std::string, std::string>::const_iterator profiler_scope_iter = node_attrs_dict.find("__profiler_scope__"); if (profiler_scope_iter != node_attrs_dict.end()) { profiler_scope = profiler_scope_iter->second; } return profiler_scope; } inline int GetDefaultDtype() { return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32; } inline int GetDefaultDtype(int dtype) { if (dtype != -1) return dtype; return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32; } struct MShadowTypeInfo { std::string name; int size; int acc_size; MShadowTypeInfo(const std::string name, const int size, const int acc_size) : name(std::move(name)), size(size), acc_size(acc_size) {} MShadowTypeInfo(const std::string name, const int size) : MShadowTypeInfo(name, size, size) {} }; MShadowTypeInfo mshadow_type_info(const int type_flag); inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) { #if _MSC_VER *ptr = _aligned_malloc(size, alignment); if (*ptr == nullptr) return false; #else int res = posix_memalign(ptr, alignment, size); if (res != 0) return false; #endif return true; } inline void AlignedMemFree(void* ptr) { #if _MSC_VER _aligned_free(ptr); #else free(ptr); #endif } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
kmeans.c
/** @file kmeans.c ** @brief K-means - Declaration ** @author Andrea Vedaldi, David Novotny **/ /* Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson. Copyright (C) 2013 Andrea Vedaldi and David Novotny. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page kmeans K-means clustering @author Andrea Vedaldi @author David Novotny @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @ref kmeans.h implements a number of algorithm for **K-means quantization**: Lloyd @cite{lloyd82least}, an accelerated version by Elkan @cite{elkan03using}, and a large scale algorithm based on Approximate Nearest Neighbors (ANN). All algorithms support @c float or @c double data and can use the $l^1$ or the $l^2$ distance for clustering. Furthermore, all algorithms can take advantage of multiple CPU cores. Please see @subpage kmeans-fundamentals for a technical description of K-means and of the algorithms implemented here. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-starting Getting started <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The goal of K-means is to partition a dataset into $K$ &ldquo;compact&rdquo; clusters. The following example demonstrates using @ref kmeans.h in the C programming language to partition @c numData @c float vectors into compute @c numCenters clusters using Lloyd's algorithm: @code #include <vl/kmeans.h> double energy ; double * centers ; // Use float data and the L2 distance for clustering KMeans * kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ; // Use Lloyd algorithm vl_kmeans_set_algorithm (kmeans, VlKMeansLloyd) ; // Initialize the cluster centers by randomly sampling the data vl_kmeans_init_centers_with_rand_data (kmeans, data, dimension, numData, numCenters) ; // Run at most 100 iterations of cluster refinement using Lloyd algorithm vl_kmeans_set_max_num_iterations (kmeans, 100) ; vl_kmeans_refine_centers (kmeans, data, numData) ; // Obtain the energy of the solution energy = vl_kmeans_get_energy(kmeans) ; // Obtain the cluster centers centers = vl_kmeans_get_centers(kmeans) ; @endcode Once the centers have been obtained, new data points can be assigned to clusters by using the ::vl_kmeans_quantize function: @code vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData) ; float * distances = vl_malloc(sizeof(float) * numData) ; vl_kmeans_quantize(kmeans, assignments, distances, data, numData) ; @endcode Alternatively, one can directly assign new pointers to the closest centers, without bothering with a ::VlKMeans object. There are several considerations that may impact the performance of KMeans. First, since K-means is usually based local optimization algorithm, the **initialization method** is important. The following initialization methods are supported: Method | Function | Description ---------------|-----------------------------------------|----------------------------------------------- Random samples | ::vl_kmeans_init_centers_with_rand_data | Random data points K-means++ | ::vl_kmeans_init_centers_plus_plus | Random selection biased towards diversity Custom | ::vl_kmeans_set_centers | Choose centers (useful to run quantization only) See @ref kmeans-init for further details. The initialization methods use a randomized selection of the data points; the random number generator init is controlled by ::vl_rand_init. The second important choice is the **optimization algorithm**. The following optimization algorithms are supported: Algorithm | Symbol | See | Description ------------|------------------|-------------------|----------------------------------------------- Lloyd | ::VlKMeansLloyd | @ref kmeans-lloyd | Alternate EM-style optimization Elkan | ::VlKMeansElkan | @ref kmeans-elkan | A speedup using triangular inequalities ANN | ::VlKMeansANN | @ref kmeans-ann | A speedup using approximated nearest neighbors See the relative sections for further details. These algorithm are iterative, and stop when either a **maximum number of iterations** (::vl_kmeans_set_max_num_iterations) is reached, or when the energy changes sufficiently slowly in one iteration (::vl_kmeans). All the three algorithms support multithreaded computations. The number of threads used is usually controlled globally by ::vl_set_num_threads. **/ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page kmeans-fundamentals K-means fundamentals @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> Given $n$ points $\bx_1,\dots,\bx_n \in \real^d$, the goal of K-means is find $K$ `centers` $\bc_1,\dots,\bc_m \in \real^d$ and `assignments` $q_1,\dots,q_n \in \{1,\dots,K\}$ of the points to the centers such that the sum of distances \[ E(\bc_1,\dots,\bc_k,q_1,\dots,q_n) = \sum_{i=1}^n \|\bx_i - \bc_{q_i} \|_p^p \] is minimized. $K$-means is obtained for the case $p=2$ ($l^2$ norm), because in this case the optimal centers are the means of the input vectors assigned to them. Here the generalization $p=1$ ($l^1$ norm) will also be considered. Up to normalization, the K-means objective $E$ is also the average reconstruction error if the original points are approximated with the cluster centers. Thus K-means is used not only to group the input points into cluster, but also to `quantize` their values. K-means is widely used in computer vision, for example in the construction of vocabularies of visual features (visual words). In these applications the number $n$ of points to cluster and/or the number $K$ of clusters is often large. Unfortunately, minimizing the objective $E$ is in general a difficult combinatorial problem, so locally optimal or approximated solutions are sought instead. The basic K-means algorithm alternate between re-estimating the centers and the assignments (@ref kmeans-lloyd). Combined with a good initialization strategy (@ref kmeans-init) and, potentially, by re-running the optimization from a number of randomized starting states, this algorithm may attain satisfactory solutions in practice. However, despite its simplicity, Lloyd's algorithm is often too slow. A good replacement is Elkan's algorithm (@ref kmeans-elkan), which uses the triangular inequality to cut down significantly the cost of Lloyd's algorithm. Since this algorithm is otherwise equivalent, it should often be preferred. For very large problems (millions of point to clusters and hundreds, thousands, or more clusters to find), even Elkan's algorithm is not sufficiently fast. In these cases, one can resort to a variant of Lloyd's algorithm that uses an approximated nearest neighbors routine (@ref kmeans-ann). <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-init Initialization methods <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> All the $K$-means algorithms considered here find locally optimal solutions; as such the way they are initialized is important. @ref kmeans.h supports the following initialization algorithms: @par Random data samples The simplest initialization method is to sample $K$ points at random from the input data and use them as initial values for the cluster centers. @par K-means++ @cite{arthur07k-means} proposes a randomized initialization of the centers which improves upon random selection. The first center $\bc_1$ is selected at random from the data points $\bx_1, \dots, \bx_n $ and the distance from this center to all points $\|\bx_i - \bc_1\|_p^p$ is computed. Then the second center $\bc_2$ is selected at random from the data points with probability proportional to the distance. The procedure is repeated to obtain the other centers by using the minimum distance to the centers collected so far. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-lloyd Lloyd's algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The most common K-means method is Lloyd's algorithm @cite{lloyd82least}. This algorithm is based on the observation that, while jointly optimizing clusters and assignment is difficult, optimizing one given the other is easy. Lloyd's algorithm alternates the steps: 1. **Quantization.** Each point $\bx_i$ is reassigned to the center $\bc_{q_j}$ closer to it. This requires finding for each point the closest among $K$ other points, which is potentially slow. 2. **Center estimation.** Each center $\bc_q$ is updated to minimize its average distances to the points assigned to it. It is easy to show that the best center is the mean or median of the points, respectively if the $l^2$ or $l^1$ norm is considered. A naive implementation of the assignment step requires $O(dnK)$ operations, where $d$ is the dimensionality of the data, $n$ the number of data points, and $K$ the number of centers. Updating the centers is much cheaper: $O(dn)$ operations suffice to compute the $K$ means and a slightly higher cost is required for the medians. Clearly, the bottleneck is the assignment computation, and this is what the other K-means algorithm try to improve. During the iterations, it can happen that a cluster becomes empty. In this case, K-means automatically **&ldquo;restarts&rdquo; the cluster** center by selecting a training point at random. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-elkan Elkan's algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> Elkan's algorithm @cite{elkan03using} is a variation of Lloyd alternate optimization algorithm (@ref kmeans-lloyd) that uses the triangular inequality to avoid many distance calculations when assigning points to clusters. While much faster than Lloyd, Elkan's method uses storage proportional to the umber of clusters by data points, which makes it unpractical for a very large number of clusters. The idea of this algorithm is that, if a center update does not move them much, then most of the point-to-center computations can be avoided when the point-to-center assignments are recomputed. To detect which distances need evaluation, the triangular inequality is used to lower and upper bound distances after a center update. Elkan algorithms uses two key observations. First, one has \[ \|\bx_i - \bc_{q_i}\|_p \leq \|\bc - \bc_{q_i}\|_p / 2 \quad\Rightarrow\quad \|\bx_i - \bc_{q_i}\|_p \leq \|\bx_i - \bc\|_p. \] Thus if the distance between $\bx_i$ and its current center $\bc_{q_i}$ is less than half the distance of the center $\bc_{q_i}$ to another center $\bc$, then $\bc$ can be skipped when the new assignment for $\bx_i$ is searched. Checking this requires keeping track of all the inter-center distances, but centers are typically a small fraction of the training data, so overall this can be a significant saving. In particular, if this condition is satisfied for all the centers $\bc \not= \bc_{q_i}$, the point $\bx_i$ can be skipped completely. Furthermore, the condition can be tested also based on an upper bound $UB_i$ of $\|\bx_i - \bc_{q_i}\|_p$. Second, if a center $\bc$ is updated to $\hat{\bc}$, then the new distance from $\bx$ to $\hat{\bc}$ is bounded from below and above by \[ \|\bx - \bc\|_p - \|bc - \hat\bc\|_p \leq \|\bx - \hat{\bc}\|_p \leq \|\bx - \hat{\bc}\|_p + \|\bc + \hat{\bc}\|_p. \] This allows to maintain an upper bound on the distance of $\bx_i$ to its current center $\bc_{q_i}$ and a lower bound to any other center $\bc$: @f{align*} UB_i & \leftarrow UB_i + \|\bc_{q_i} - \hat{\bc}_{q_i} \|_p \\ LB_i(\bc) & \leftarrow LB_i(\bc) - \|\bc -\hat \bc\|_p. @f} Thus the K-means algorithm becomes: 1. **Initialization.** Compute $LB_i(\bc) = \|\bx_i -\hat \bc\|_p$ for all points and centers. Find the current assignments $q_i$ and bounds $UB_i$ by finding the closest centers to each point: $UB_i = \min_{\bc} LB_i(\bc)$. 2. **Center estimation.** 1. Recompute all the centers based on the new means; call the updated version $\hat{\bc}$. 2. Update all the bounds based on the distance $\|\bc - \hat\bc\|_p$ as explained above. 3. Set $\bc \leftarrow \hat\bc$ for all the centers and go to the next iteration. 3. **Quantization.** 1. Skip any point $\bx_i$ such that $UB_i \leq \frac{1}{2} \|\bc_{q_i} - \bc\|_p$ for all centers $\bc \not= \bc_{q_i}$. 2. For each remaining point $\bx_i$ and center $\bc \not= \bc_{q_i}$: 1. Skip $\bc$ if \[ UB_i \leq \frac{1}{2} \| \bc_{q_i} - \bc \| \quad\text{or}\quad UB_i \leq LB_i(\bc). \] The first condition reflects the first observation above; the second uses the bounds to decide if $\bc$ can be closer than the current center $\bc_{q_i}$ to the point $\bx_i$. If the center cannot be skipped, continue as follows. 3. Skip $\bc$ if the condition above is satisfied after making the upper bound tight: \[ UB_i = LB_i(\bc_{q_i}) = \| \bx_i - \bc_{q_i} \|_p. \] Note that the latter calculation can be done only once for $\bx_i$. If the center cannot be skipped still, continue as follows. 4. Tighten the lower bound too: \[ LB_i(\bc) = \| \bx_i - \bc \|_p. \] At this point both $UB_i$ and $LB_i(\bc)$ are tight. If $LB_i < UB_i$, then the point $\bx_i$ should be reassigned to $\bc$. Update $q_i$ to the index of center $\bc$ and reset $UB_i = LB_i(\bc)$. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-ann ANN algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The *Approximate Nearest Neighbor* (ANN) K-means algorithm @cite{beis97shape} @cite{silpa-anan08optimised} @cite{muja09fast} is a variant of Lloyd's algorithm (@ref kmeans-lloyd) uses a best-bin-first randomized KD-tree algorithm to approximately (and quickly) find the closest cluster center to each point. The KD-tree implementation is based on @ref kdtree. The algorithm can be summarized as follows: 1. **Quantization.** Each point $\bx_i$ is reassigned to the center $\bc_{q_j}$ closer to it. This starts by indexing the $K$ centers by a KD-tree and then using the latter to quickly find the closest center for every training point. The search is approximated to further improve speed. This opens up the possibility that a data point may receive an assignment that is *worse* than the current one. This is avoided by checking that the new assignment estimated by using ANN is an improvement; otherwise the old assignment is kept. 2. **Center estimation.** Each center $\bc_q$ is updated to minimize its average distances to the points assigned to it. It is easy to show that the best center is the mean or median of the points, respectively if the $l^2$ or $l^1$ norm is considered. The key is to trade-off carefully the speedup obtained by using the ANN algorithm and the loss in accuracy when retrieving neighbors. Due to the curse of dimensionality, KD-trees become less effective for higher dimensional data, so that the search cost, which in the best case is logarithmic with this data structure, may become effectively linear. This is somehow mitigated by the fact that new a new KD-tree is computed at each iteration, reducing the likelihood that points may get stuck with sub-optimal assignments. Experiments with the quantization of 128-dimensional SIFT features show that the ANN algorithm may use one quarter of the comparisons of Elkan's while retaining a similar solution accuracy. */ #include "kmeans.h" #include "generic.h" #include "mathop.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Reset state ** ** The function reset the state of the KMeans object. It deletes ** any stored centers, releasing the corresponding memory. This ** cancels the effect of seeding or setting the centers, but ** does not change the other configuration parameters. **/ VL_EXPORT void vl_kmeans_reset (VlKMeans * self) { self->numCenters = 0 ; self->dimension = 0 ; if (self->centers) vl_free(self->centers) ; if (self->centerDistances) vl_free(self->centerDistances) ; self->centers = NULL ; self->centerDistances = NULL ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object ** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE) ** @param distance distance. ** @return new KMeans object instance. **/ VL_EXPORT VlKMeans * vl_kmeans_new (vl_type dataType, VlVectorComparisonType distance) { VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ; self->algorithm = VlKMeansLloyd ; self->distance = distance ; self->dataType = dataType ; self->verbosity = 0 ; self->maxNumIterations = 100 ; self->numRepetitions = 1 ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = 3; self->maxNumComparisons = 100; vl_kmeans_reset (self) ; return self ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object by copy ** @param kmeans KMeans object to copy. ** @return new copy. **/ VL_EXPORT VlKMeans * vl_kmeans_new_copy (VlKMeans const * kmeans) { VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ; self->algorithm = kmeans->algorithm ; self->distance = kmeans->distance ; self->dataType = kmeans->dataType ; self->verbosity = kmeans->verbosity ; self->maxNumIterations = kmeans->maxNumIterations ; self->numRepetitions = kmeans->numRepetitions ; self->dimension = kmeans->dimension ; self->numCenters = kmeans->numCenters ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = kmeans->numTrees; self->maxNumComparisons = kmeans->maxNumComparisons; if (kmeans->centers) { vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ; self->centers = vl_malloc(dataSize) ; memcpy (self->centers, kmeans->centers, dataSize) ; } if (kmeans->centerDistances) { vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ; self->centerDistances = vl_malloc(dataSize) ; memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ; } return self ; } /** ------------------------------------------------------------------ ** @brief Deletes a KMeans object ** @param self KMeans object instance. ** ** The function deletes the KMeans object instance created ** by ::vl_kmeans_new. **/ VL_EXPORT void vl_kmeans_delete (VlKMeans * self) { vl_kmeans_reset (self) ; vl_free (self) ; } /* an helper structure */ typedef struct _VlKMeansSortWrapper { vl_uint32 * permutation ; void const * data ; vl_size stride ; } VlKMeansSortWrapper ; /* ---------------------------------------------------------------- */ /* Instantiate shuffle algorithm */ #define VL_SHUFFLE_type vl_uindex #define VL_SHUFFLE_prefix _vl_kmeans #include "shuffle-def.h" /* #ifdef VL_KMEANS_INSTANTITATING */ #endif /* ================================================================ */ #ifdef VL_KMEANS_INSTANTIATING /* ---------------------------------------------------------------- */ /* Set centers */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_set_centers_, SFX) (VlKMeans * self, TYPE const * centers, vl_size dimension, vl_size numCenters) { self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; memcpy ((TYPE*)self->centers, centers, sizeof(TYPE) * dimension * numCenters) ; } /* ---------------------------------------------------------------- */ /* Random seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex i, j, k ; VlRand * rand = vl_get_rand () ; self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; { vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ; /* get a random permutation of the data point */ for (i = 0 ; i < numData ; ++i) perm[i] = i ; _vl_kmeans_shuffle (perm, numData, rand) ; for (k = 0, i = 0 ; k < numCenters ; ++ i) { /* compare the next data point to all centers collected so far to detect duplicates (if there are enough left) */ if (numCenters - k < numData - i) { vl_bool duplicateDetected = VL_FALSE ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances, dimension, data + dimension * perm[i], 1, (TYPE*)self->centers, k, distFn) ; for (j = 0 ; j < k ; ++j) { duplicateDetected |= (distances[j] == 0) ; } if (duplicateDetected) continue ; } /* ok, it is not a duplicate so we can accept it! */ memcpy ((TYPE*)self->centers + dimension * k, data + dimension * perm[i], sizeof(TYPE) * dimension) ; k ++ ; } vl_free(distances) ; vl_free(perm) ; } } /* ---------------------------------------------------------------- */ /* kmeans++ seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex x, c ; VlRand * rand = vl_get_rand () ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = (TYPE) VL_INFINITY_D ; } /* select the first point at random */ x = vl_rand_uindex (rand, numData) ; c = 0 ; while (1) { TYPE energy = 0 ; TYPE acc = 0 ; TYPE thresh = (TYPE) vl_rand_real1 (rand) ; memcpy ((TYPE*)self->centers + c * dimension, data + x * dimension, sizeof(TYPE) * dimension) ; c ++ ; if (c == numCenters) break ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX) (distances, dimension, (TYPE*)self->centers + (c - 1) * dimension, 1, data, numData, distFn) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = VL_MIN(minDistances[x], distances[x]) ; energy += minDistances[x] ; } for (x = 0 ; x < numData - 1 ; ++x) { acc += minDistances[x] ; if (acc >= thresh * energy) break ; } } vl_free(distances) ; vl_free(minDistances) ; } /* ---------------------------------------------------------------- */ /* Quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData) { vl_index i ; #ifdef _OPENMP vl_size numThreads = vl_get_max_threads() ; #endif #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif #ifdef _OPENMP #pragma omp parallel default(none) \ shared(self, distances, assignments, numData, distFn, data) \ num_threads(numThreads) #endif { /* vl_malloc cannot be used here if mapped to MATLAB malloc */ TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ; #ifdef _OPENMP #pragma omp for #endif for (i = 0 ; i < (signed)numData ; ++i) { vl_uindex k ; TYPE bestDistance = (TYPE) VL_INFINITY_D ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters, self->dimension, data + self->dimension * i, 1, (TYPE*)self->centers, self->numCenters, distFn) ; for (k = 0 ; k < self->numCenters ; ++k) { if (distanceToCenters[k] < bestDistance) { bestDistance = distanceToCenters[k] ; assignments[i] = (vl_uint32)k ; } } if (distances) distances[i] = bestDistance ; } free(distanceToCenters) ; } } /* ---------------------------------------------------------------- */ /* ANN quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_ann_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData, vl_bool update) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ; vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons); vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN); vl_kdforest_build(forest,self->numCenters,self->centers); #ifdef _OPENMP #pragma omp parallel default(none) \ num_threads(vl_get_max_threads()) \ shared(self, forest, update, assignments, distances, data, numData, distFn) #endif { VlKDForestNeighbor neighbor ; VlKDForestSearcher * searcher ; vl_index x; #ifdef _OPENMP #pragma omp critical #endif searcher = vl_kdforest_new_searcher (forest) ; #ifdef _OPENMP #pragma omp for #endif for(x = 0 ; x < (signed)numData ; ++x) { vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension)); if (distances) { if(!update) { distances[x] = (TYPE) neighbor.distance; assignments[x] = (vl_uint32) neighbor.index ; } else { TYPE prevDist = (TYPE) distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension *assignments[x]); if (prevDist > (TYPE) neighbor.distance) { distances[x] = (TYPE) neighbor.distance ; assignments[x] = (vl_uint32) neighbor.index ; } else { distances[x] = prevDist ; } } } else { assignments[x] = (vl_uint32) neighbor.index ; } } /* end for */ } /* end of parallel region */ vl_kdforest_delete(forest); } /* ---------------------------------------------------------------- */ /* Helper functions */ /* ---------------------------------------------------------------- */ /* The sorting routine is used to find increasing permutation of each * data dimension. This is used to quickly find the median for l1 * distance clustering. */ VL_INLINE TYPE VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { return ((TYPE*)array->data) [array->permutation[indexA] * array->stride] - ((TYPE*)array->data) [array->permutation[indexB] * array->stride] ; } VL_INLINE void VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { vl_uint32 tmp = array->permutation[indexA] ; array->permutation[indexA] = array->permutation[indexB] ; array->permutation[indexB] = tmp ; } #define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort) #define VL_QSORT_array VlKMeansSortWrapper* #define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) #define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) #include "qsort-def.h" static void VL_XCAT(_vl_kmeans_sort_data_helper_, SFX) (VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData) { vl_uindex d, x ; for (d = 0 ; d < self->dimension ; ++d) { VlKMeansSortWrapper array ; array.permutation = permutations + d * numData ; array.data = data + d ; array.stride = self->dimension ; for (x = 0 ; x < numData ; ++x) { array.permutation[x] = (vl_uint32)x ; } VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ; } } /* ---------------------------------------------------------------- */ /* Lloyd refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double previousEnergy = VL_INFINITY_D ; double energy ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ; } break ; } if (energy == previousEnergy) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ; } break ; } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } /* next Lloyd iteration */ if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } static double VL_XCAT(_vl_kmeans_update_center_distances_, SFX) (VlKMeans * self) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif if (! self->centerDistances) { self->centerDistances = vl_malloc (sizeof(TYPE) * self->numCenters * self->numCenters) ; } VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances, self->dimension, self->centers, self->numCenters, NULL, 0, distFn) ; return self->numCenters * (self->numCenters - 1) / 2 ; } static double VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double previousEnergy = VL_INFINITY_D ; double energy ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { double eps; /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ; } break ; } eps = (previousEnergy - energy)/previousEnergy; if (energy == previousEnergy || eps < 0.00001) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ; } break ; } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: VL_PRINT("bad distance set: %d\n",self->distance); abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } /* ---------------------------------------------------------------- */ /* Elkan refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size d, iteration ; vl_index x ; vl_uint32 c, j ; vl_bool allDone ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; VlRand * rand = vl_get_rand () ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ; vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ; TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ; TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ; TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; double energy ; vl_size totDistanceComputationsToInit = 0 ; vl_size totDistanceComputationsToRefreshUB = 0 ; vl_size totDistanceComputationsToRefreshLB = 0 ; vl_size totDistanceComputationsToRefreshCenterDistances = 0 ; vl_size totDistanceComputationsToNewCenters = 0 ; vl_size totDistanceComputationsToFinalize = 0 ; vl_size totNumRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Initialization */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* An iteration is: get_new_centers + reassign + get_energy. This counts as iteration 0, where get_new_centers is assumed to be performed before calling the train function by the initialization function */ /* update distances between centers */ totDistanceComputationsToInit += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; /* assigmen points to the initial centers and initialize bounds */ memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE distance ; /* do the first center */ assignments[x] = 0 ; distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + 0) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[0 + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; /* do other centers */ for (c = 1 ; c < self->numCenters ; ++c) { /* Can skip if the center assigned so far is twice as close as its distance to the center under consideration */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + assignments[x] * self->numCenters]) { continue ; } distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; pointToCenterLB[c + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; if (distance < pointToClosestCenterUB[x]) { pointToClosestCenterUB[x] = distance ; assignments[x] = c ; } } } /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n", energy, totDistanceComputationsToInit) ; } /* #define SANITY*/ #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after initial assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n", cc, xx, a, b) ; } } } #endif /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Iterations */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ for (iteration = 1 ; 1; ++iteration) { vl_size numDistanceComputationsToRefreshUB = 0 ; vl_size numDistanceComputationsToRefreshLB = 0 ; vl_size numDistanceComputationsToRefreshCenterDistances = 0 ; vl_size numDistanceComputationsToNewCenters = 0 ; vl_size numRestartedCenters = 0 ; /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Compute new centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { clusterMasses[assignments[x]] ++ ; } switch (self->distance) { case VlDistanceL2: memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE * cpt = newCenters + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = newCenters + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { /* restart the center */ vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < (signed)numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { newCenters [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = newCenters + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; default: abort(); } /* done compute centers */ /* compute the distance from the old centers to the new centers */ for (c = 0 ; c < self->numCenters ; ++c) { TYPE distance = distFn(self->dimension, newCenters + c * self->dimension, (TYPE*)self->centers + c * self->dimension) ; centerToNewCenterDistances[c] = distance ; numDistanceComputationsToNewCenters += 1 ; } /* make the new centers current */ { TYPE * tmp = self->centers ; self->centers = newCenters ; newCenters = tmp ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Reassign points to a centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Update distances between centers. */ numDistanceComputationsToRefreshCenterDistances += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; for (c = 0 ; c < self->numCenters ; ++c) { nextCenterDistances[c] = (TYPE) VL_INFINITY_D ; for (j = 0 ; j < self->numCenters ; ++j) { if (j == c) continue ; nextCenterDistances[c] = VL_MIN(nextCenterDistances[c], ((TYPE*)self->centerDistances) [j + c * self->numCenters]) ; } } /* Update upper bounds on point-to-closest-center distances based on the center variation. */ for (x = 0 ; x < (signed)numData ; ++x) { TYPE a = pointToClosestCenterUB[x] ; TYPE b = centerToNewCenterDistances[assignments[x]] ; if (self->distance == VlDistanceL1) { pointToClosestCenterUB[x] = a + b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ; } pointToClosestCenterUBIsStrict[x] = VL_FALSE ; } /* Update lower bounds on point-to-center distances based on the center variation. */ #if defined(_OPENMP) #pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++x) { for (c = 0 ; c < self->numCenters ; ++c) { TYPE a = pointToCenterLB[c + x * self->numCenters] ; TYPE b = centerToNewCenterDistances[c] ; if (a < b) { pointToCenterLB[c + x * self->numCenters] = 0 ; } else { if (self->distance == VlDistanceL1) { pointToCenterLB[c + x * self->numCenters] = a - b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ; } } } } #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies before assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* Scan the data and do the reassignments. Use the bounds to skip as many point-to-center distance calculations as possible. */ allDone = VL_TRUE ; #if defined(_OPENMP) #pragma omp parallel for \ default(none) \ shared(self,numData, \ pointToClosestCenterUB,pointToCenterLB, \ nextCenterDistances,pointToClosestCenterUBIsStrict, \ assignments,data,distFn,allDone) \ private(c,x) \ reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \ num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++ x) { /* A point x sticks with its current center assignmets[x] the UB to d(x, c[assigmnets[x]]) is not larger than half the distance of c[assigments[x]] to any other center c. */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) { continue ; } for (c = 0 ; c < self->numCenters ; ++c) { vl_uint32 cx = assignments[x] ; TYPE distance ; /* The point is not reassigned to a given center c if either: 0 - c is already the assigned center 1 - The UB of d(x, c[assignments[x]]) is smaller than half the distance of c[assigments[x]] to c, OR 2 - The UB of d(x, c[assignmets[x]]) is smaller than the LB of the distance of x to c. */ if (cx == c) { continue ; } if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } /* If the UB is loose, try recomputing it and test again */ if (! pointToClosestCenterUBIsStrict[x]) { distance = distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[cx + x * self->numCenters] = distance ; numDistanceComputationsToRefreshUB += 1 ; if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } } /* Now the UB is strict (equal to d(x, assignments[x])), but we still could not exclude that x should be reassigned to c. We therefore compute the distance, update the LB, and check if a reassigmnet must be made */ distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; numDistanceComputationsToRefreshLB += 1 ; pointToCenterLB[c + x * self->numCenters] = distance ; if (distance < pointToClosestCenterUB[x]) { assignments[x] = c ; pointToClosestCenterUB[x] = distance ; allDone = VL_FALSE ; /* the UB strict flag is already set here */ } } /* assign center */ } /* next data point */ totDistanceComputationsToRefreshUB += numDistanceComputationsToRefreshUB ; totDistanceComputationsToRefreshLB += numDistanceComputationsToRefreshLB ; totDistanceComputationsToRefreshCenterDistances += numDistanceComputationsToRefreshCenterDistances ; totDistanceComputationsToNewCenters += numDistanceComputationsToNewCenters ; totNumRestartedCenters += numRestartedCenters ; #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { vl_size numDistanceComputations = numDistanceComputationsToRefreshUB + numDistanceComputationsToRefreshLB + numDistanceComputationsToRefreshCenterDistances + numDistanceComputationsToNewCenters ; VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n", iteration, energy, numDistanceComputations) ; if (numRestartedCenters) { VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n", iteration, energy, numRestartedCenters) ; } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: " "UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d)\n", iteration, 100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations, numDistanceComputationsToRefreshUB, 100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations, numDistanceComputationsToRefreshLB, 100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations, numDistanceComputationsToRefreshCenterDistances, 100.0 * numDistanceComputationsToNewCenters / numDistanceComputations, numDistanceComputationsToNewCenters) ; } } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ; } break ; } if (allDone) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ; } break ; } } /* next Elkan iteration */ /* compute true energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++ x) { vl_uindex cx = assignments [x] ; energy += distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; totDistanceComputationsToFinalize += 1 ; } { vl_size totDistanceComputations = totDistanceComputationsToInit + totDistanceComputationsToRefreshUB + totDistanceComputationsToRefreshLB + totDistanceComputationsToRefreshCenterDistances + totDistanceComputationsToNewCenters + totDistanceComputationsToFinalize ; double saving = (double)totDistanceComputations / (iteration * self->numCenters * numData) ; if (self->verbosity) { VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n", totDistanceComputations, saving * 100.0) ; if (totNumRestartedCenters) { VL_PRINTF("kmeans: Elkan: there have been %d restarts\n", totNumRestartedCenters) ; } } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan: total dist. calc. per type: " "init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d), " "finalize: %.1f%% (%d)\n", 100.0 * totDistanceComputationsToInit / totDistanceComputations, totDistanceComputationsToInit, 100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations, totDistanceComputationsToRefreshUB, 100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations, totDistanceComputationsToRefreshLB, 100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations, totDistanceComputationsToRefreshCenterDistances, 100.0 * totDistanceComputationsToNewCenters / totDistanceComputations, totDistanceComputationsToNewCenters, 100.0 * totDistanceComputationsToFinalize / totDistanceComputations, totDistanceComputationsToFinalize) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; vl_free(nextCenterDistances) ; vl_free(pointToClosestCenterUB) ; vl_free(pointToClosestCenterUBIsStrict) ; vl_free(pointToCenterLB) ; vl_free(newCenters) ; vl_free(centerToNewCenterDistances) ; return energy ; } /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { switch (self->algorithm) { case VlKMeansLloyd: return VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ; break ; case VlKMeansElkan: return VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ; break ; case VlKMeansANN: return VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ; break ; default: abort() ; } } /* VL_KMEANS_INSTANTIATING */ #else #ifndef __DOXYGEN__ #define FLT VL_TYPE_FLOAT #define TYPE float #define SFX f #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #define FLT VL_TYPE_DOUBLE #define TYPE double #define SFX d #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #endif /* VL_KMEANS_INSTANTIATING */ #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Set centers ** @param self KMeans object. ** @param centers centers to copy. ** @param dimension data dimension. ** @param numCenters number of centers. **/ VL_EXPORT void vl_kmeans_set_centers (VlKMeans * self, void const * centers, vl_size dimension, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_set_centers_f (self, (float const *)centers, dimension, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_set_centers_d (self, (double const *)centers, dimension, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief init centers by randomly sampling data ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. ** ** The function inits the KMeans centers by randomly sampling ** the data @a data. **/ VL_EXPORT void vl_kmeans_init_centers_with_rand_data (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_with_rand_data_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_with_rand_data_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Seed centers by the KMeans++ algorithm ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. **/ VL_EXPORT void vl_kmeans_init_centers_plus_plus (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_plus_plus_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_plus_plus_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data ** @param self KMeans object. ** @param assignments data to closest center assignments (output). ** @param distances data to closest center distance (output). ** @param data data to quantize. ** @param numData number of data points to quantize. **/ VL_EXPORT void vl_kmeans_quantize (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_f (self, assignments, distances, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_d (self, assignments, distances, (double const *)data, numData) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data using approximate nearest neighbours (ANN). ** @param self KMeans object. ** @param assignments data to centers assignments (output). ** @param distances data to closes center distance (output) ** @param data data to quantize. ** @param numData number of data points. ** @param update choose wether to update current assignments. ** ** The function uses an ANN procedure to compute the approximate ** nearest neighbours of the input data point. ** ** Setting @a update to ::VL_TRUE will cause the algorithm ** to *update existing assignments*. This means that each ** element of @a assignments and @a distances is updated ony if the ** ANN procedure can find a better assignment of the existing one. **/ VL_EXPORT void vl_kmeans_quantize_ann (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData, vl_bool update) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_ann_f (self, assignments, distances, (float const *)data, numData, update) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_ann_d (self, assignments, distances, (double const *)data, numData, update) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Refine center locations. ** @param self KMeans object. ** @param data data to quantize. ** @param numData number of data points. ** @return K-means energy at the end of optimization. ** ** The function calls the underlying K-means quantization algorithm ** (@ref VlKMeansAlgorithm) to quantize the specified data @a data. ** The function assumes that the cluster centers have already ** been assigned by using one of the seeding functions, or by ** setting them. **/ VL_EXPORT double vl_kmeans_refine_centers (VlKMeans * self, void const * data, vl_size numData) { assert (self->centers) ; switch (self->dataType) { case VL_TYPE_FLOAT : return _vl_kmeans_refine_centers_f (self, (float const *)data, numData) ; case VL_TYPE_DOUBLE : return _vl_kmeans_refine_centers_d (self, (double const *)data, numData) ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Cluster data. ** @param self KMeans object. ** @param data data to quantize. ** @param dimension data dimension. ** @param numData number of data points. ** @param numCenters number of clusters. ** @return K-means energy at the end of optimization. ** ** The function initializes the centers by using the initialization ** algorithm set by ::vl_kmeans_set_initialization and refines them ** by the quantization algorithm set by ::vl_kmeans_set_algorithm. ** The process is repeated one or more times (see ** ::vl_kmeans_set_num_repetitions) and the resutl with smaller ** energy is retained. **/ VL_EXPORT double vl_kmeans_cluster (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex repetition ; double bestEnergy = VL_INFINITY_D ; void * bestCenters = NULL ; for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) { double energy ; double timeRef ; if (self->verbosity) { VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ; } timeRef = vl_get_cpu_time() ; switch (self->initialization) { case VlKMeansRandomSelection : vl_kmeans_init_centers_with_rand_data (self, data, dimension, numData, numCenters) ; break ; case VlKMeansPlusPlus : vl_kmeans_init_centers_plus_plus (self, data, dimension, numData, numCenters) ; break ; default: abort() ; } if (self->verbosity) { VL_PRINTF("kmeans: K-means initialized in %.2f s\n", vl_get_cpu_time() - timeRef) ; } timeRef = vl_get_cpu_time () ; energy = vl_kmeans_refine_centers (self, data, numData) ; if (self->verbosity) { VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n", vl_get_cpu_time() - timeRef, energy) ; } /* copy centers to output if current solution is optimal */ /* check repetition == 0 as well in case energy = NaN, which */ /* can happen if the data contain NaNs */ if (energy < bestEnergy || repetition == 0) { void * temp ; bestEnergy = energy ; if (bestCenters == NULL) { bestCenters = vl_malloc(vl_get_type_size(self->dataType) * self->dimension * self->numCenters) ; } /* swap buffers */ temp = bestCenters ; bestCenters = self->centers ; self->centers = temp ; } /* better energy */ } /* next repetition */ vl_free (self->centers) ; self->centers = bestCenters ; return bestEnergy ; } /* VL_KMEANS_INSTANTIATING */ #endif #undef SFX #undef TYPE #undef FLT #undef VL_KMEANS_INSTANTIATING
real_to_reciprocal.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "phonoc_array.h" #include "phonoc_const.h" #include "real_to_reciprocal.h" #include "lapack_wrapper.h" static void real_to_reciprocal_single_thread(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s_map, const long *s2p_map); static void real_to_reciprocal_openmp(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s_map, const long *s2p_map); static void real_to_reciprocal_elements(lapack_complex_double *fc3_rec_elem, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s, const long *s2p, const long pi0, const long pi1, const long pi2); static lapack_complex_double get_phase_factor(const double q[3][3], const long qi, const double (*svecs)[3], const long multi[2]); static lapack_complex_double get_pre_phase_factor(const long i_patom, const double q_vecs[3][3], const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s_map); /* fc3_reciprocal[num_patom, num_patom, num_patom, 3, 3, 3] */ void r2r_real_to_reciprocal(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s_map, const long *s2p_map, const long openmp_at_bands) { if (openmp_at_bands) { real_to_reciprocal_openmp(fc3_reciprocal, q_vecs, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, p2s_map, s2p_map); } else { real_to_reciprocal_single_thread(fc3_reciprocal, q_vecs, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, p2s_map, s2p_map); } } static void real_to_reciprocal_single_thread(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s_map, const long *s2p_map) { long i, j, k, l, m, n; long num_patom, num_band; lapack_complex_double pre_phase_factor, fc3_rec_elem[27]; num_patom = multi_dims[1]; num_band = num_patom * 3; for (i = 0; i < num_patom; i++) { pre_phase_factor = get_pre_phase_factor( i, q_vecs, svecs, multi_dims, multiplicity, p2s_map); for (j = 0; j < num_patom; j++) { for (k = 0; k < num_patom; k++) { real_to_reciprocal_elements(fc3_rec_elem, q_vecs, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, p2s_map, s2p_map, i, j, k); for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3_reciprocal[(i * 3 + l) * num_band * num_band + (j * 3 + m) * num_band + k * 3 + n] = phonoc_complex_prod(fc3_rec_elem[l * 9 + m * 3 + n], pre_phase_factor); } } } } } } } static void real_to_reciprocal_openmp(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s_map, const long *s2p_map) { long i, j, k, l, m, n, jk; long num_patom, num_band; lapack_complex_double pre_phase_factor, fc3_rec_elem[27]; num_patom = multi_dims[1]; num_band = num_patom * 3; for (i = 0; i < num_patom; i++) { pre_phase_factor = get_pre_phase_factor( i, q_vecs, svecs, multi_dims, multiplicity, p2s_map); #ifdef PHPYOPENMP #pragma omp parallel for private(j, k, l, m, n, fc3_rec_elem) #endif for (jk = 0; jk < num_patom * num_patom; jk++) { j = jk / num_patom; k = jk % num_patom; real_to_reciprocal_elements(fc3_rec_elem, q_vecs, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, p2s_map, s2p_map, i, j, k); for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3_reciprocal[(i * 3 + l) * num_band * num_band + (j * 3 + m) * num_band + k * 3 + n] = phonoc_complex_prod(fc3_rec_elem[l * 9 + m * 3 + n], pre_phase_factor); } } } } } } static void real_to_reciprocal_elements(lapack_complex_double *fc3_rec_elem, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s, const long *s2p, const long pi0, const long pi1, const long pi2) { long i, j, k, l; long num_satom, adrs_shift, adrs_vec1, adrs_vec2; lapack_complex_double phase_factor, phase_factor1, phase_factor2; double fc3_rec_real[27], fc3_rec_imag[27]; for (i = 0; i < 27; i++) { fc3_rec_real[i] = 0; fc3_rec_imag[i] = 0; } num_satom = multi_dims[0]; if (is_compact_fc3) { i = pi0; } else { i = p2s[pi0]; } for (j = 0; j < num_satom; j++) { if (s2p[j] != p2s[pi1]) { continue; } adrs_vec1 = j * multi_dims[1] + pi0; phase_factor1 = get_phase_factor(q_vecs, 1, svecs, multiplicity[adrs_vec1]); for (k = 0; k < num_satom; k++) { if (s2p[k] != p2s[pi2]) { continue; } adrs_vec2 = k * multi_dims[1] + pi0; phase_factor2 = get_phase_factor(q_vecs, 2, svecs, multiplicity[adrs_vec2]); adrs_shift = i * 27 * num_satom * num_satom + j * 27 * num_satom + k * 27; phase_factor = phonoc_complex_prod(phase_factor1, phase_factor2); for (l = 0; l < 27; l++) { fc3_rec_real[l] += lapack_complex_double_real(phase_factor) * fc3[adrs_shift + l]; fc3_rec_imag[l] += lapack_complex_double_imag(phase_factor) * fc3[adrs_shift + l]; } } } for (i = 0; i < 27; i++) { fc3_rec_elem[i] = lapack_make_complex_double(fc3_rec_real[i], fc3_rec_imag[i]); } } static lapack_complex_double get_pre_phase_factor(const long i_patom, const double q_vecs[3][3], const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const long *p2s_map) { long i, j, svecs_adrs; double pre_phase, sum_real, sum_imag; lapack_complex_double pre_phase_factor; svecs_adrs = p2s_map[i_patom] * multi_dims[1]; sum_real = 0; sum_imag = 0; for (i = 0; i < multiplicity[svecs_adrs][0]; i++) { pre_phase = 0; for (j = 0; j < 3; j++) { pre_phase += svecs[multiplicity[svecs_adrs][1] + i][j] * (q_vecs[0][j] + q_vecs[1][j] + q_vecs[2][j]); } pre_phase *= M_2PI; sum_real += cos(pre_phase); sum_imag += sin(pre_phase); } sum_real /= multiplicity[svecs_adrs][0]; sum_imag /= multiplicity[svecs_adrs][0]; pre_phase_factor = lapack_make_complex_double(sum_real, sum_imag); return pre_phase_factor; } static lapack_complex_double get_phase_factor(const double q[3][3], const long qi, const double (*svecs)[3], const long multi[2]) { long i, j; double sum_real, sum_imag, phase; sum_real = 0; sum_imag = 0; for (i = 0; i < multi[0]; i++) { phase = 0; for (j = 0; j < 3; j++) { phase += q[qi][j] * svecs[multi[1] + i][j]; } phase *= M_2PI; sum_real += cos(phase); sum_imag += sin(phase); } sum_real /= multi[0]; sum_imag /= multi[0]; return lapack_make_complex_double(sum_real, sum_imag); }
soxr.c
/* SoX Resampler Library Copyright (c) 2007-13 robs@users.sourceforge.net * Licence for this file: LGPL v2.1 See LICENCE for details. */ #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "soxr.h" #include "data-io.h" #include "internal.h" char const * soxr_version(void) { return "libsoxr-" SOXR_THIS_VERSION_STR; } typedef void sample_t; /* float or double */ typedef void (* fn_t)(void); typedef fn_t control_block_t[10]; #define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0]) #define resampler_process (*(void (*)(void *, size_t))p->control_block[1]) #define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2]) #define resampler_flush (*(void (*)(void *))p->control_block[3]) #define resampler_close (*(void (*)(void *))p->control_block[4]) #define resampler_delay (*(double (*)(void *))p->control_block[5]) #define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6]) #define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7]) #define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8]) #define resampler_id (*(char const * (*)(void))p->control_block[9]) typedef void * resampler_t; /* For one channel. */ typedef void * resampler_shared_t; /* Between channels. */ typedef void (* deinterleave_t)(sample_t * * dest, soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch); typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest, sample_t const * const * src, size_t, unsigned, unsigned long *); struct soxr { unsigned num_channels; double io_ratio; soxr_error_t error; soxr_quality_spec_t q_spec; soxr_io_spec_t io_spec; soxr_runtime_spec_t runtime_spec; void * input_fn_state; soxr_input_fn_t input_fn; size_t max_ilen; resampler_shared_t shared; resampler_t * resamplers; control_block_t control_block; deinterleave_t deinterleave; interleave_t interleave; void * * channel_ptrs; size_t clips; unsigned long seed; int flushing; }; /* TODO: these should not be here. */ #define TO_3dB(a) ((1.6e-6*a-7.5e-4)*a+.646) #define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */ soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags) { soxr_quality_spec_t spec, * p = &spec; unsigned quality = recipe & 0xf; double rej; memset(p, 0, sizeof(*p)); if (quality > 13) { p->e = "invalid quality type"; return spec; } if (quality == 13) quality = 6; else if (quality > 10) quality = 0; p->phase_response = "\62\31\144"[(recipe & 0x30) >> 4]; p->stopband_begin = 1; p->precision = !quality? 0: quality < 3? 16 : quality < 8? 4 + quality * 4 : 55 - quality * 4; rej = p->precision * linear_to_dB(2.); p->flags = flags; if (quality < 8) { p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / TO_3dB(rej); if (quality <= 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } else { static float const bw[] = {.931f, .832f, .663f}; p->passband_end = bw[quality - 8]; if (quality - 8 == 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } if (recipe & SOXR_STEEP_FILTER) p->passband_end = 1 - .01 / TO_3dB(rej); return spec; } char const * soxr_engine(soxr_t p) { return resampler_id(); } size_t * soxr_num_clips(soxr_t p) { return &p->clips; } soxr_error_t soxr_error(soxr_t p) { return p->error; } soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads) { soxr_runtime_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); p->log2_min_dft_size = 10; p->log2_large_dft_size = 17; p->coef_size_kbytes = 400; p->num_threads = num_threads; return spec; } soxr_io_spec_t soxr_io_spec( soxr_datatype_t itype, soxr_datatype_t otype) { soxr_io_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); if ((itype | otype) >= SOXR_SPLIT * 2) p->e = "invalid io datatype(s)"; else { p->itype = itype; p->otype = otype; p->scale = 1; } return spec; } #if HAVE_SIMD static bool cpu_has_simd(void) { #if defined __x86_64__ || defined _M_X64 return true; #elif defined __GNUC__ && defined i386 uint32_t eax, ebx, ecx, edx; __asm__ __volatile__ ( "pushl %%ebx \n\t" "cpuid \n\t" "movl %%ebx, %1\n\t" "popl %%ebx \n\t" : "=a"(eax), "=r"(ebx), "=c"(ecx), "=d"(edx) : "a"(1) : "cc" ); return !!(edx & 0x06000000); #elif defined _MSC_VER && defined _M_IX86 uint32_t d; __asm { xor eax, eax inc eax push ebx cpuid pop ebx mov d, edx } return !!(d & 0x06000000); #endif return false; } #endif extern control_block_t _soxr_rate32s_cb, _soxr_rate32_cb, _soxr_rate64_cb, _soxr_vr32_cb; soxr_t soxr_create( double input_rate, double output_rate, unsigned num_channels, soxr_error_t * error0, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { double io_ratio = output_rate? input_rate? input_rate / output_rate : -1 : input_rate? -1 : 0; static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768}; soxr_t p = 0; soxr_error_t error = 0; if (q_spec && q_spec->e) error = q_spec->e; else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2) error = "invalid io datatype(s)"; if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed"; if (p) { p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0); if (q_spec) { /* Backwards compatibility with original API: */ if (p->q_spec.passband_end > 2) p->q_spec.passband_end /= 100; if (p->q_spec.stopband_begin > 2) p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100; } p->io_ratio = io_ratio; p->num_channels = num_channels; if (io_spec) p->io_spec = *io_spec; else p->io_spec.scale = 1; p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1); p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] / datatype_full_scale[p->io_spec.itype & 3]; p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p; #if HAVE_SINGLE_PRECISION if (!HAVE_DOUBLE_PRECISION || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION)) || (p->q_spec.flags & SOXR_VR)) { p->deinterleave = (deinterleave_t)_soxr_deinterleave_f; p->interleave = (interleave_t)_soxr_interleave_f; memcpy(&p->control_block, (p->q_spec.flags & SOXR_VR)? &_soxr_vr32_cb : #if HAVE_SIMD cpu_has_simd()? &_soxr_rate32s_cb : #endif &_soxr_rate32_cb, sizeof(p->control_block)); } #if HAVE_DOUBLE_PRECISION else #endif #endif #if HAVE_DOUBLE_PRECISION { p->deinterleave = (deinterleave_t)_soxr_deinterleave; p->interleave = (interleave_t)_soxr_interleave; memcpy(&p->control_block, &_soxr_rate64_cb, sizeof(p->control_block)); } #endif if (p->num_channels && io_ratio) error = soxr_set_io_ratio(p, io_ratio, 0); } if (error) soxr_delete(p), p = 0; if (error0) *error0 = error; return p; } soxr_error_t soxr_set_input_fn(soxr_t p, soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen) { p->input_fn_state = input_fn_state; p->input_fn = input_fn; p->max_ilen = max_ilen? max_ilen : (size_t)-1; return 0; } static void soxr_delete0(soxr_t p) { unsigned i; if (p->resamplers) for (i = 0; i < p->num_channels; ++i) { if (p->resamplers[i]) resampler_close(p->resamplers[i]); free(p->resamplers[i]); } free(p->resamplers); free(p->channel_ptrs); free(p->shared); memset(p, 0, sizeof(*p)); } double soxr_delay(soxr_t p) { return (p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0; } static soxr_error_t fatal_error(soxr_t p, soxr_error_t error) { soxr_delete0(p); return p->error = error; } static soxr_error_t initialise(soxr_t p) { unsigned i; size_t shared_size, channel_size; resampler_sizes(&shared_size, &channel_size); p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels); p->shared = calloc(shared_size, 1); p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels); if (!p->shared || !p->channel_ptrs || !p->resamplers) return fatal_error(p, "malloc failed"); for (i = 0; i < p->num_channels; ++i) { soxr_error_t error; if (!(p->resamplers[i] = calloc(channel_size, 1))) return fatal_error(p, "malloc failed"); error = resampler_create( p->resamplers[i], p->shared, p->io_ratio, &p->q_spec, &p->runtime_spec, p->io_spec.scale); if (error) return fatal_error(p, error); } return 0; } soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels) { if (!p) return "invalid soxr_t pointer"; if (num_channels == p->num_channels) return p->error; if (!num_channels) return "invalid # of channels"; if (p->resamplers) return "# of channels can't be changed"; p->num_channels = num_channels; return soxr_set_io_ratio(p, p->io_ratio, 0); } soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len) { unsigned i; soxr_error_t error; if (!p) return "invalid soxr_t pointer"; if ((error = p->error)) return error; if (!p->num_channels) return "must set # channels before O/I ratio"; if (io_ratio <= 0) return "I/O ratio out-of-range"; if (!p->channel_ptrs) { p->io_ratio = io_ratio; return initialise(p); } if (p->control_block[8]) { for (i = 0; !error && i < p->num_channels; ++i) resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len); return error; } return fabs(p->io_ratio - io_ratio) < 1e-15? 0 : "Varying O/I ratio is not supported with this quality level"; } void soxr_delete(soxr_t p) { if (p) soxr_delete0(p), free(p); } soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */ { if (p) { struct soxr tmp = *p; soxr_delete0(p); memset(p, 0, sizeof(*p)); p->input_fn = tmp.input_fn; p->runtime_spec = tmp.runtime_spec; p->q_spec = tmp.q_spec; p->io_spec = tmp.io_spec; p->num_channels = tmp.num_channels; p->input_fn_state = tmp.input_fn_state; memcpy(p->control_block, tmp.control_block, sizeof(p->control_block)); p->deinterleave = tmp.deinterleave; p->interleave = tmp.interleave; return 0; } return "invalid soxr_t pointer"; } static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len) { sample_t * dest = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1); } static size_t soxr_input(soxr_t p, void const * in, size_t len) { bool separated = !!(p->io_spec.itype & SOXR_SPLIT); unsigned i; if (!p || p->error) return 0; if (!in && len) {p->error = "null input buffer pointer"; return 0;} if (!len) { p->flushing = true; return 0; } if (separated) for (i = 0; i < p->num_channels; ++i) soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len); else { for (i = 0; i < p->num_channels; ++i) p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)( (sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels); } return len; } static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated) { sample_t const * src; if (p->flushing) resampler_flush(p->resamplers[i]); resampler_process(p->resamplers[i], len); src = resampler_output(p->resamplers[i], NULL, &len); if (separated) p->clips += (p->interleave)(p->io_spec.otype, &dest, &src, len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); else p->channel_ptrs[i] = (void /* const */ *)src; return len; } static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len) { unsigned u; size_t done = 0; bool separated = !!(p->io_spec.otype & SOXR_SPLIT); #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done1; done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated); if (!i) done = done1; } else #endif for (u = 0; u < p->num_channels; ++u) done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated); if (!separated) p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs, done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); return done; } size_t soxr_output(soxr_t p, void * out, size_t len0) { size_t odone, odone0 = 0, olen = len0, osize, idone; size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio)); void const * in = out; /* Set to !=0, so that caller may leave unset. */ bool was_flushing; if (!p || p->error) return 0; if (!out && len0) {p->error = "null output buffer pointer"; return 0;} do { odone = soxr_output_no_callback(p, out, olen); odone0 += odone; if (odone0 == len0 || !p->input_fn || p->flushing) break; osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels; out = (char *)out + osize * odone; olen -= odone; idone = p->input_fn(p->input_fn_state, &in, ilen); was_flushing = p->flushing; if (!in) p->error = "input function reported failure"; else soxr_input(p, in, idone); } while (odone || idone || (!was_flushing && p->flushing)); return odone0; } static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen) { size_t result; #if 0 if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING) result = rate_i_for_o(p->resamplers[0], olen); else #endif result = (size_t)ceil((double)olen * p->io_ratio); return min(result, ilen); } #if 0 static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen) { size_t result = (size_t)ceil((double)ilen / p->io_ratio); return min(result, olen); } #endif soxr_error_t soxr_process(soxr_t p, void const * in , size_t ilen0, size_t * idone0, void * out, size_t olen , size_t * odone0) { size_t ilen, idone, odone = 0; unsigned u; bool flush_requested = false; if (!p) return "null pointer"; if (!in) flush_requested = true, ilen = ilen0 = 0; else { if ((ptrdiff_t)ilen0 < 0) flush_requested = true, ilen0 = ~ilen0; if (idone0 && (1 || flush_requested)) ilen = soxr_i_for_o(p, olen, ilen0); else ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/; } p->flushing |= ilen == ilen0 && flush_requested; if (!out && !in) idone = ilen; else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */ #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done; if (in) soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen); done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true); if (!i) odone = done; } else #endif for (u = 0; u < p->num_channels; ++u) { if (in) soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen); odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true); } idone = ilen; } else { idone = ilen? soxr_input (p, in , ilen) : 0; odone = soxr_output(p, out, olen); } if (idone0) *idone0 = idone; if (odone0) *odone0 = odone; return p->error; } soxr_error_t soxr_oneshot( double irate, double orate, unsigned num_channels, void const * in , size_t ilen, size_t * idone, void * out, size_t olen, size_t * odone, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { soxr_t resampler; soxr_error_t error = q_spec? q_spec->e : 0; if (!error) { soxr_quality_spec_t q_spec1; if (!q_spec) q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1; resampler = soxr_create(irate, orate, num_channels, &error, io_spec, q_spec, runtime_spec); } if (!error) { error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone); soxr_delete(resampler); } return error; } soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error) { if (!p) return "null pointer"; if (!p->error && p->error != error) return p->error; p->error = error; return 0; }
GB_unop__identity_fc32_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_uint8) // op(A') function: GB (_unop_tran__identity_fc32_uint8) // C type: GxB_FC32_t // A type: uint8_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_uint8) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
graph_decomposition_ref.h
#ifndef __GRAPH_DECOMPOSITION_REF_H__ #define __GRAPH_DECOMPOSITION_REF_H__ #include "graph.h" #include <mutex> #include <random> #include <omp.h> static float* genExp_ref(int n, float rate, float* maxVal, int* maxId) { std::default_random_engine generator; // note this will always generate the same values - which we want - for grading std::exponential_distribution<double> distribution(rate); float maxdu = -1.f; int id = -1; std::mutex mtx; float* vals = (float*) malloc(sizeof(float) * n); for (int i = 0; i < n; i++) { float val = distribution(generator); if (val > maxdu) { mtx.lock(); if (val > maxdu) { maxdu = val; id = i; } mtx.unlock(); } vals[i] = val; } *maxVal = maxdu; *maxId = id; return vals; } /** * Given an array of floats, casts them all into **/ static int* chopToInt_ref(float* fdus, int n) { int* dus = (int*)malloc(sizeof(int) * n); #pragma omp parallel for schedule(dynamic, 512) for (int i = 0; i < n; i++) { dus[i] = (int)fdus[i]; } return dus; } static int* getDus_ref(int n, float rate, int* maxVal, int* maxId) { float fmaxVal; float* expVals = genExp_ref(n, rate, &fmaxVal, maxId); int* dus = chopToInt_ref(expVals, n); free(expVals); *maxVal = (int)fmaxVal; return dus; } void decompose_ref(graph *g, int *decomp, int* dus, int maxDu, int maxDuId); #endif
PairwiseSumByTask.c
#include <omp.h> double PairwiseSumBySubtask(double* restrict var, long nstart, long nend); double PairwiseSumByTask(double* restrict var, long ncells) { double sum; #pragma omp parallel { #pragma omp master { sum = PairwiseSumBySubtask(var, 0, ncells); } } return(sum); } double PairwiseSumBySubtask(double* restrict var, long nstart, long nend) { long nsize = nend - nstart; long nmid = nsize/2; double x,y; if (nsize == 1){ return(var[nstart]); } #pragma omp task shared(x) mergeable final(nsize > 10) x = PairwiseSumBySubtask(var, nstart, nstart + nmid); #pragma omp task shared(y) mergeable final(nsize > 10) y = PairwiseSumBySubtask(var, nend - nmid, nend); #pragma omp taskwait return(x+y); }
cli.h
#pragma once #include <gms/third_party/gapbs/command_line.h> #include <gms/third_party/clipp.h> #include "gms/common/format.h" #include "parameter.h" #include "args.h" #include "compat.h" namespace GMS::CLI { using clipp::option; using clipp::parameter; using clipp::value; class Parser { private: std::vector<ParamSpec> param_specs; clipp::group custom_params; bool allow_directed_ = false; public: /** * @brief Allow directed graphs as input. * * By default GMS only allows undirected graphs as inputs and symmetrizes any input graphs by default, * with this method this functionality can be changed. * * @param allow can also be set to false with this method again */ void allow_directed(bool allow = true) { allow_directed_ = allow; } /** * Define a benchmark specific parameter. * * @param name Main identifier of the parameter. * @param alias Alternative identifier of the parameter. * @param defaultValue Default value (if none is provided, the parameter is mandatory). * @param help Documentation to be displayed for this parameter. * @return an instance of Param which can be used to retrieve the value, after one of the parse * methods has been invoked on this class. * However, for situations where this could be inconvenient, it's also possible to access * the value with the Args.param method. */ Param add_param( const std::string &name, const std::optional<std::string> &alias, const std::optional<std::string> &defaultValue, const std::string &help) { ParamSpec param_spec(name, alias, defaultValue, help); param_specs.push_back(param_spec); std::string help_string; if (param_spec.default_value.has_value()) { help_string = param_spec.help + " (default: " + quote_empty_string(param_spec.default_value.value()) + ")"; } else { help_string = param_spec.help + " (required)"; } parameter opt = param_spec.alias.has_value() ? option(param_spec.name, param_spec.alias.value()) : option(param_spec.name); opt.doc(help_string); opt.required(!param_spec.default_value.has_value()); parameter val = value("", *param_spec.value_ptr); custom_params.push_back(opt & val); return Param(param_specs.back().value_ptr); } Args parse(int argc, char **argv) const { Args args(param_specs); std::string file_name; std::string gen_name; int64_t gen_scale; int64_t gen_avgdeg = 16; auto cli = ( option("-v", "--verify").set(args.verify).doc("perform a basic verification of the computation"), option("-t", "--threads").doc("specify the number of threads used") & value("threads", args.threads), option("-n", "--num-trials").doc("number of iterations for the benchmark") & value("trials", args.num_trials) ); if (custom_params.size() > 0) { cli.push_back( option("-p", "--param").doc("set kernel specific parameters") & with_suffix("=", custom_params) ); } auto cli_read_file = ( option("-f", "--file").required(true).doc("read graph from the specified file") & value("file_name", file_name) ); if (allow_directed_) { cli_read_file.push_back( option("-u", "--undirected", "--no-symmetrize") .set(args.symmetrize, false) .doc("don't symmetrize the input graph before running the benchmark") ); } else { // Symmetrize by default. args.symmetrize = true; } auto cli_generate = ( option("-g", "--gen").required(true).doc("generate graph with the specified generator") & ( option("uniform").required(true).set(gen_name, std::string("uniform")) | option("kronecker").required(true).set(gen_name, std::string("kronecker")) ) & value("scale", gen_scale).doc("size of the generated graph = 2^scale"), option("--deg") & value("average_degree", gen_avgdeg) ); cli.push_back(cli_read_file | cli_generate); if (!clipp::parse(argc, argv, cli)) { std::cout << make_man_page(cli, argv[0]); args.error = 100; return args; } if (!file_name.empty()) { args.graph_spec.name = file_name; args.graph_spec.is_generator = false; } else if (!gen_name.empty()) { args.graph_spec.name = gen_name; args.graph_spec.is_generator = true; args.graph_spec.gen_scale = gen_scale; args.graph_spec.gen_avgdeg = gen_avgdeg; } else { args.error = 101; return args; } // note: copied from gapbs/command_line.h #ifdef _OPENMP if (args.threads != 0) { omp_set_dynamic(0); omp_set_num_threads(args.threads); } #pragma omp parallel { #pragma omp master std::cout << "Using " << omp_get_num_threads() << " OMP threads" << std::endl; } #else // _OPENMP std::cout << "OMP is disabled. Using 1 thread." << std::endl; #endif // _OPENMP return args; } auto parse_and_load(int argc, char **argv) const { Args args = parse(argc, argv); if (args.error != 0) { std::exit(args.error); } args.print(); auto g = args.load_graph(); if (!allow_directed_ && g.directed()) { // If this happens, it's probably a bug. // TODO but check how it interacts with cached/preloaded graphs std::cerr << "undirected graph not allowed, but loaded an undirected graph" << std::endl; std::exit(100); } // TODO this should be improved in a further commit bool allow_relabel = true; if (allow_relabel && WorthRelabelling(g)) { g = Builder::RelabelByDegree(g); std::cout << "---------\n" << "NOTE: The input graph got relabeled.\n" << "---------" << std::endl; } return std::make_tuple<Args, CSRGraph>(std::move(args), std::move(g)); } }; }
GB_unaryop__minv_int64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_uint8 // op(A') function: GB_tran__minv_int64_uint8 // C type: int64_t // A type: uint8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_uint8 ( int64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hilos.c
//Ejercicio EDAII. Profesor Francisco Javier Rodriguez /*Instrucción de compilación gcc -Wall -std=c99 hilos.c -fopenmp */ #include <stdio.h> #include <stdlib.h> // para system() y rand() #include <time.h> // para time() #include <omp.h> #define TAM 10 int main() { int a[TAM]; int b[TAM]; srand( time( NULL ) ); #pragma omp parallel num_threads( 4 ) { //#pragma omp single { printf( "Thread %d\n", omp_get_thread_num() ); for( size_t i = 0; i < TAM; ++i ){ a[ i ] = rand() % 100; printf( "a[%d] = %d\n", i, a[i] ); } } // --- Implicit barrier #pragma omp for for( size_t i = 0; i < TAM; ++i ){ b[ i ] = a[ i ] * 2; } // --- Implicit barrier // #pragma omp single { for( size_t i = 0; i < TAM; ++i ){ printf( "Thread(%d): b[%d] = %d\n", omp_get_thread_num(), i, b[i] ); } } // --- Implicit barrier } // --- Implicit barrier }
trmm_x_csr_n_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT* acc_nnz = alpha_malloc(sizeof(ALPHA_INT) * mat->rows); memset(acc_nnz, '\0', mat->rows * sizeof(ALPHA_INT)); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT ar = 0; ar < mat->rows; ar++) { for (ALPHA_INT ai = mat->rows_start[ar]; ai < mat->rows_end[ar]; ai++) { if (mat->col_indx[ai] >= ar) { acc_nnz[ar] += 1; } } } for (ALPHA_INT i = 1; i < mat->rows; i++) { acc_nnz[i] += acc_nnz[i - 1]; } ALPHA_INT *partition = alpha_malloc((num_threads + 1) * sizeof(ALPHA_INT)); balanced_partition_row_by_nnz(acc_nnz,mat->rows, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_m_s = partition[tid]; ALPHA_INT local_m_e = partition[tid + 1]; for (ALPHA_INT r = local_m_s; r < local_m_e; ++r) { ALPHA_Number *Y = &y[index2(r, 0, ldy)]; for (ALPHA_INT c = 0; c <columns; c++) alpha_mule(Y[c], beta); for (ALPHA_INT ai = mat->rows_start[r]; ai < mat->rows_end[r]; ai++) { ALPHA_INT ac = mat->col_indx[ai]; if (ac >= r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); const ALPHA_Number *X = &x[index2(ac, 0, ldx)]; for (ALPHA_INT c = 0; c <columns; ++c) alpha_madde(Y[c], val, X[c]); } } } } alpha_free(partition); alpha_free(acc_nnz); return ALPHA_SPARSE_STATUS_SUCCESS; }
factorize_gmp_2step_primtest.c
/******************************************************************************************************************* * Compiling: mpicc fattor.c -lgmp -fopenmp -o fattor * Running: mpirun -n PROCNUM --bind-to none fattor NUMBER * Note: PROCNUM is the number of processes that will be ran, and it must be >=2, NUMBER is the number to factorize *******************************************************************************************************************/ #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <gmp.h> struct elem { // Very basic and non-reusable stack mpz_t val; struct elem* next; }; void add(struct elem** head, mpz_t val) { struct elem* app = malloc(sizeof(struct elem)); mpz_init(app->val); mpz_set(app->val, val); // app->val = val; app->next = *head; *head = app; } void pick(struct elem** head, mpz_t toret) { mpz_init(toret); struct elem* app; if(*head == NULL) mpz_set_ui(toret, 0); // toret = 0; else { mpz_set(toret, (*head)->val); // toret = (*head)->val; app = *head; *head = (*head)->next; // mpz_finalize(app->val); free(app); } } void master_procedure(int comm_size) { int i = 1; long long rec; int shit_happened; unsigned char buffer[50]; MPI_Status stat; int count; mpz_t received_number; mpz_init(received_number); char stringa[200]; while(i < comm_size) { shit_happened = MPI_Recv(buffer, 50, MPI_UNSIGNED_CHAR, i, MPI_ANY_TAG, MPI_COMM_WORLD, &stat); MPI_Get_count(&stat, MPI_UNSIGNED_CHAR, &count); mpz_import(received_number, count, 1, 1, 1, 0, buffer); if(shit_happened) { fprintf(stderr, "Recv failed"); MPI_Abort(MPI_COMM_WORLD, 1); } if(mpz_cmp_ui(received_number, 0) == 0) // if(received_number == 0) ++i; else { mpz_get_str(stringa, 10, received_number); printf("Factor: %s\n", stringa); } } } void slave_procedure(int my_rank, int comm_size, mpz_t the_number) { int shit_happened; struct elem* head = NULL; unsigned char* buffer; mpz_t temp; mpz_t from; mpz_t to; mpz_t to_send; mpz_t div2; mpz_t number2; mpz_init(temp); mpz_init(from); mpz_init(to); mpz_init(to_send); mpz_init(div2); mpz_init(number2); mpz_set_ui(number2, 2); mpz_root(temp, the_number, 2); // temp = sqrt(the_number); mpz_div_ui(temp, temp, comm_size - 1); // temp = temp / (comm_size - 1); mpz_mul_ui(from, temp, my_rank - 1); // from = temp * (my_rank - 1); mpz_mul_ui(to, temp, my_rank); // to = temp * my_rank; if(mpz_cmp_ui(from, 0) == 0) { // if(from == 0) if(mpz_divisible_ui_p(the_number, 2)) { mpz_divexact_ui(div2, the_number, 2); // divided = the_number / from_thread; // Only works if the_number % from_thread == 0; add(&head, number2); add(&head, div2); } mpz_set_ui(from, 1); // from = 1; } if(mpz_divisible_ui_p(from, 2)) // if(from % 2 == 0) mpz_add_ui(from, from, 1); // ++from; #pragma omp parallel shared(from, to) { int my_thread = omp_get_thread_num(); int threads = omp_get_num_threads(); mpz_t from_thread; mpz_t to_thread; mpz_t divided; mpz_init(from_thread); mpz_init(to_thread); mpz_init(divided); mpz_sub(to_thread, to, from); // to_thread = to - from; mpz_set(from_thread, to_thread); // from_thread = to_thread; mpz_div_ui(to_thread, to_thread, threads); // to_thread = to_thread / threads; mpz_mul_ui(to_thread, to_thread, my_thread + 1); // to_thread = to_thread * (my_thread + 1); mpz_div_ui(from_thread, from_thread, threads); // from_thread = from_thread / threads; mpz_mul_ui(from_thread, from_thread, my_thread); // from_thread = from_thread * my_thread; mpz_add(from_thread, from_thread, from); // from_thread = from_thread + from; mpz_add(to_thread, to_thread, from); // to_thread = to_thread + from; if(mpz_divisible_ui_p(from_thread, 2)) // if(from_thread % 2 == 0) mpz_add_ui(from_thread, from_thread, 1); // from_thread = from_thread + 1; while(mpz_cmp(from_thread, to_thread) <= 0) { if(mpz_divisible_p(the_number, from_thread)) { mpz_divexact(divided, the_number, from_thread); // divided = the_number / from_thread; // Only works if the_number % from_thread == 0; if(mpz_probab_prime_p(from_thread, 25)) { #pragma omp critical { add(&head, from_thread); } } if(mpz_probab_prime_p(divided, 25)) { #pragma omp critical { add(&head, divided); } } } mpz_add_ui(from_thread, from_thread, 2); // from_thread += 2; } } do { pick(&head, to_send); int how_many_bytes = (mpz_sizeinbase(to_send, 2) + 7) / 8; // How many bytes is to_send buffer = malloc(how_many_bytes); *buffer = 0; mpz_export(buffer, NULL, 1, 1, 1, 0, to_send); // Export the number to buffer shit_happened = MPI_Send(buffer, how_many_bytes, MPI_UNSIGNED_CHAR, 0, 0, MPI_COMM_WORLD); if(shit_happened) { fprintf(stderr, "Send failed"); MPI_Abort(MPI_COMM_WORLD, 1); } free(buffer); }while(mpz_cmp_ui(to_send, 0)); } int main(int argc, char** argv) { int my_rank, comm_size; mpz_t the_number; mpz_init(the_number); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if(argc <= 1) { fprintf(stderr, "Missing number as argument"); MPI_Abort(MPI_COMM_WORLD, 1); } else mpz_set_str(the_number, argv[1], 10); // 10 is the base if(my_rank == 0) master_procedure(comm_size); else slave_procedure(my_rank, comm_size, the_number); MPI_Finalize(); return 0; }
Math.h
// // Created by Jarlene on 2017/7/21. // #ifndef MATRIX_MATH_H #define MATRIX_MATH_H #include <math.h> #include <assert.h> #include <functional> #include <vector> #include <random> #include <sys/time.h> #include "Logger.h" #include "Eigen.h" #ifdef USE_MP #include <omp.h> #endif #ifdef USE_MKL #ifndef BLAS #define BLAS #endif #include <mkl.h> #include <mkl_cblas.h> #include <mkl_vsl.h> #include <mkl_vsl_functions.h> #elif defined(USE_BLAS) #ifndef BLAS #define BLAS #endif #include <cblas.h> #endif namespace matrix { static inline bool isLess(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } static struct timeval tv; static std::mt19937 rnd_engine_; enum BlasTranspose { NoTrans, Trans, ConjTrans }; /// C := alpha*op(A)*op(B) + beta*C /// \tparam T the type of input data /// \param TransA /// \param TransB /// \param M /// \param N /// \param K /// \param alpha /// \param A /// \param B /// \param beta /// \param C template <class T> inline void CPUGemm(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const T alpha, const T *A, const T *B, const T beta, T *C); template <> inline void CPUGemm<float>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const float alpha, const float *A, const float *B, const float beta, float *C) { #ifdef BLAS int lda = (TransA == NoTrans) ? K : M; int ldb = (TransB == NoTrans) ? N : K; CBLAS_TRANSPOSE Atrans, Btrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; } switch (TransB) { case NoTrans: Btrans = CblasNoTrans; break; case Trans: Btrans = CblasTrans; break; case ConjTrans: Btrans = CblasConjTrans; break; } cblas_sgemm(CblasRowMajor, Atrans, Btrans, M, N, K, alpha, A, lda, B, ldb, beta, C, N); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans) ? M : K; // A 的行 int ldb = (TransB == NoTrans) ? N : K; // B 的列 int aCol = (TransA == NoTrans) ? K : M; // A的列 auto aMatrix = create<float>(A, lda, aCol); auto bMatrix = create<float>(B, aCol, ldb); auto cMatrix = create<float>(C, lda, ldb); cMatrix = alpha * aMatrix * bMatrix + beta * cMatrix; #endif } template <> inline void CPUGemm<double>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const double alpha, const double *A, const double *B, const double beta, double *C) { #ifdef BLAS int lda = (TransA == NoTrans) ? K : M; int ldb = (TransB == NoTrans) ? N : K; CBLAS_TRANSPOSE Atrans, Btrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; } switch (TransB) { case NoTrans: Btrans = CblasNoTrans; break; case Trans: Btrans = CblasTrans; break; case ConjTrans: Btrans = CblasConjTrans; break; } cblas_dgemm(CblasRowMajor, Atrans, Btrans, M, N, K, alpha, A, lda, B, ldb, beta, C, N); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans) ? M : K; // A 的行 int ldb = (TransB == NoTrans) ? K : N; // B 的列 int aCol = (TransA == NoTrans) ? K : M; // A的列 auto aMatrix = create<double>(A, lda, aCol); auto bMatrix = create<double>(B, aCol, ldb); auto cMatrix = create<double>(C, lda, ldb); cMatrix = alpha * aMatrix * bMatrix + beta * cMatrix; #endif } template <> inline void CPUGemm<int>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const int alpha, const int *A, const int *B, const int beta, int *C) { #ifdef USE_EIGEN int lda = (TransA == NoTrans) ? M : K; // A 的行 int ldb = (TransB == NoTrans) ? N : K; // B 的列 int aCol = (TransA == NoTrans) ? K : M; // A的列 auto aMatrix = create<int>(A, lda, aCol); auto bMatrix = create<int>(B, aCol, ldb); auto cMatrix = create<int>(C, lda, ldb); cMatrix = alpha * aMatrix * bMatrix + beta * cMatrix; #endif } template <> inline void CPUGemm<long>(const BlasTranspose TransA, const BlasTranspose TransB, const int M, const int N, const int K, const long alpha, const long *A, const long *B, const long beta, long *C) { } /// y := alpha*A*x + beta*y, or y := alpha*A^T*x + beta*y, /// \tparam T /// \param TransA /// \param M /// \param N /// \param alpha /// \param A /// \param x /// \param beta /// \param y template <class T> inline void CPUGemv(const BlasTranspose TransA, const int M, const int N, const T alpha, const T *A, const T *x, const T beta, T *y); template <> inline void CPUGemv<float>(const BlasTranspose TransA, const int M, const int N, const float alpha, const float *A, const float *x, const float beta, float *y) { #ifdef BLAS CBLAS_TRANSPOSE Atrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; default: break; } cblas_sgemv(CblasRowMajor, Atrans, M, N, alpha, A, N, x, 1, beta, y, 1); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans)? M : N; int cda = (TransA == NoTrans)? N : M; auto aMatrix = create<>(A, lda, cda); auto xVector = create<>(x, cda); auto yVector = create<>(y, lda); yVector = alpha * aMatrix * xVector + beta * yVector; #endif } template <> inline void CPUGemv<double>(const BlasTranspose TransA, const int M, const int N, const double alpha, const double *A, const double *x, const double beta, double *y) { #ifdef BLAS CBLAS_TRANSPOSE Atrans; switch (TransA) { case NoTrans: Atrans = CblasNoTrans; break; case Trans: Atrans = CblasTrans; break; case ConjTrans: Atrans = CblasConjTrans; break; default: break; } cblas_dgemv(CblasRowMajor, Atrans, M, N, alpha, A, N, x, 1, beta, y, 1); #elif defined(USE_EIGEN) int lda = (TransA == NoTrans)? M : N; int cda = (TransA == NoTrans)? N : M; auto aMatrix = create<double>(A, lda, cda); auto xVector = create<double>(x, cda); auto yVector = create<double>(y, lda); yVector = alpha * aMatrix * xVector + beta * yVector; #endif } template <> inline void CPUGemv<int>(const BlasTranspose TransA, const int M, const int N, const int alpha, const int *A, const int *x, const int beta, int *y) { #ifdef USE_EIGEN int lda = (TransA == NoTrans)? M : N; int cda = (TransA == NoTrans)? N : M; auto aMatrix = create<int>(A, lda, cda); auto xVector = create<int>(x, cda); auto yVector = create<int>(y, lda); yVector = alpha * aMatrix * xVector + beta * yVector; #endif } template <> inline void CPUGemv<long>(const BlasTranspose TransA, const int M, const int N, const long alpha, const long *A, const long *x, const long beta, long *y) { } /// Y = alpha * X + Y /// \tparam T /// \param N /// \param alpha /// \param X /// \param incx /// \param Y /// \param incy template <class T> inline void CPUAxpy(const int N, const T alpha, const T *X, int incx, T *Y, int incy); template <> inline void CPUAxpy<float>(const int N, const float alpha, const float *X, int incx, float *Y, int incy) { #ifdef BLAS cblas_saxpy(N, alpha, X, incx, Y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } #endif } template <> inline void CPUAxpy<double>(const int N, const double alpha, const double *X, int incx, double *Y, int incy) { #ifdef BLAS cblas_daxpy(N, alpha, X, incx, Y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } #endif } template <> inline void CPUAxpy<int>(const int N, const int alpha, const int *X, int incx, int *Y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } } template <> inline void CPUAxpy<long>(const int N, const long alpha, const long *X, int incx, long *Y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i <N; ++i) { Y[posy] += alpha * X[posx]; posx += incx; posy += incy; } } /** * Y = alpha * X + beta * Y * @param T * @param N * @param alpha * @param X * @param beta * @param Y */ template <class T> inline void CPUAxpby(const int N, const T alpha, const T *X, int incx, const T beta, T *Y, int incy); template <> inline void CPUAxpby<float>(const int N, const float alpha, const float *X, int incx, const float beta, float *Y, int incy) { #ifdef BLAS cblas_saxpby(N, alpha, X, incx, beta, Y, incy); #endif } template <> inline void CPUAxpby<double>(const int N, const double alpha, const double *X, int incx, const double beta, double *Y, int incy) { #ifdef BLAS cblas_daxpby(N, alpha, X, incx, beta, Y, incy); #endif } template <> inline void CPUAxpby<int>(const int N, const int alpha, const int *X, int incx, const int beta, int *Y, int incy) { } template <> inline void CPUAxpby<long>(const int N, const long alpha, const long *X, int incx, const long beta, long *Y, int incy) { } /// Y=X /// \tparam T /// \param N /// \param x /// \param incx /// \param y /// \param incy template <class T> inline void CPUCopy(const int N, const T* x, int incx, T* y, int incy); template <> inline void CPUCopy<float>(const int N, const float* x, int incx, float* y, int incy) { #ifdef BLAS cblas_scopy(N, x, incx, y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } #endif } template <> inline void CPUCopy<double>(const int N, const double* x, int incx, double* y, int incy) { #ifdef BLAS cblas_dcopy(N, x, incx, y, incy); #else int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } #endif } template <> inline void CPUCopy<int>(const int N, const int* x, int incx, int* y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } } template <> inline void CPUCopy<long>(const int N, const long* x, int incx, long* y, int incy) { int posx = 0; int posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { y[posy] = x[posx]; posy += incy; posx += incx; } } /// /// \tparam T /// \param N /// \param x /// \param incx /// \param y /// \param incy template <class T> inline void CPUSwap(const int N, T * x, int incx, T *y, int incy ); template <> inline void CPUSwap<float>(const int N, float * x, int incx, float *y, int incy ) { #ifdef BLAS cblas_sswap(N, x, incx, y, incy); #else int posx = 0, posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { std::swap(x[posx], y[posy]); posx += incx; posy += incy; } #endif } template <> inline void CPUSwap<double>(const int N, double * x, int incx, double *y, int incy ) { #ifdef BLAS cblas_dswap(N, x, incx, y, incy); #else int posx = 0, posy = 0; #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { std::swap(x[posx], y[posy]); posx += incx; posy += incy; } #endif } template <class T> inline void CPUSwap(const int N, T * x) { #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N/2; ++i) { std::swap(x[i], x[N-1-i]); } } /// res = x'*y /// \tparam T /// \param N /// \param x /// \param y /// \param res template <class T> inline void CPUDot(const int N, const T* x, const T* y, T& res); template <> inline void CPUDot<float>(const int N, const float* x, const float* y, float& res) { #ifdef BLAS res = cblas_sdot(N, x, 1, y, 1); #else #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } #endif } template <> inline void CPUDot<double>(const int N, const double* x, const double* y, double& res) { #ifdef BLAS res = cblas_ddot(N, x, 1, y, 1); #else #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } #endif } template <> inline void CPUDot<int>(const int N, const int* x, const int* y, int& res) { #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } } template <> inline void CPUDot<long>(const int N, const long* x, const long* y, long& res) { #ifdef USE_MP omp_set_num_threads(CPU_CORES); #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { res += x[i] * y[i]; } } template <class T> inline void Value(const int N, T* out, T val) { if (val == T(0)) { memset(out, 0, sizeof(T) * N); return; } #ifdef USE_EIGEN Vec<T> vec = create<T>(out, N); vec.fill(val); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] = val; } #endif } template <class T> inline void Scale(const int N, T* out, T val); template <> inline void Scale<float>(const int N, float* out, float val) { #ifdef BLAS cblas_sscal(N, val, out, 1); #elif define(USE_EIGEN) auto v = create<float>(out, N); v = v * val; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } #endif } template <> inline void Scale<double>(const int N, double* out, double val) { #ifdef BLAS cblas_dscal(N, val, out, 1); #elif define(USE_EIGEN) auto v = create<double>(out, N); v = v*val; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } #endif } template <> inline void Scale<int>(const int N, int* out, int val) { #ifdef USE_EIGEN auto v = create<int>(out, N); v *= val; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } #endif } template <> inline void Scale<long>(const int N, long* out, long val) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] *= val; } } template <class T> inline void Random(const int N, T *out, T mu, T sigma) { gettimeofday(&tv,NULL); std::normal_distribution<T> dist_normal(mu, sigma); rnd_engine_.seed((unsigned int) (tv.tv_sec * 1000 * 1000 + tv.tv_usec)); #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] = dist_normal(rnd_engine_); } } template <class T> inline void Add(const int N, const int M, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N, M); auto bv = create<T>(b, N); auto yv = create<T>(y, N, M); yv = av.colwise() + bv; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { y[i * M +j] = a[i * M +j] + b[i]; } } #endif } template <class T> inline void Add(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = (av + bv); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] + b[i]; } #endif } template <class T> inline void Sub(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = av - bv; #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] - b[i]; } #endif } template <class T> inline void Mul(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = av.array() * bv.array(); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] * b[i]; } #endif } template <class T> inline void Div(const int N, const T *a, const T *b, T *y) { #ifdef USE_EIGEN auto av = create<T>(a, N); auto bv = create<T>(b, N); auto yv = create<T>(y, N); yv = av.array() / bv.array(); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = a[i] / b[i]; } #endif } template <class T> inline void Reciprocal(const int N, T *x) { #ifdef USE_EIGEN auto xv = create<T>(x, N); xv /= T(1.0); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { x[i] = T(1.0) / x[i]; } #endif } template <class T> inline void Negative(const int N, T *x) { #ifdef USE_EIGEN auto xv = create<T>(x, N); xv = (T(0) - xv); #else #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { x[i] = -x[i]; } #endif } /// tanh /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Tanh(const int N, const T *x, T *y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = (exp(x[i])-exp(-x[i]))/(exp(x[i]) + exp(-x[i])); } } /// tanh gradient /// \tparam T /// \param N /// \param x /// \param y /// \param z template <class T> inline void TanhGrad(const int N, const T *x, const T *y, T *z) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { z[i] = y[i] * (T(1) - x[i]*x[i]); } } /// sigmoid /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Sigmoid(const int N, const T*x, T *y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = T(1)/(T(1) + exp(T(-1) * x[i])); } } /// sigmoid gradient /// \tparam T /// \param N /// \param x /// \param y /// \param z template <class T> inline void SigmoidGrad(const int N, const T *x, const T *y, T *z) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { z[i] = y[i]*x[i]*((T)1-x[i]); } } /// relu /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Relu(const int N, const T *x, T *y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { y[i] = (x[i] > T(0) ? x[i] : T(0)); } } /// relu gradient /// \tparam T /// \param N /// \param dx /// \param x /// \param dy template <class T> inline void ReluGrad(const int N, const T *x, const T *dx, T* dy) { #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i < N; ++i) { dy[i] = (x[i] > (T)0 ? dx[i] : 0); } } /// softmax /// \tparam T /// \param N /// \param x /// \param y template <class T> inline void Softmax(const int N, const T* x, T* y) { const T max = *std::max_element(x, x + N); T sum = (T)0; #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i<N; ++i) { y[i] = std::exp(x[i] - max); sum += y[i]; } #ifdef USE_MP #pragma omp parallel for #endif for (int i=0; i<N; ++i) { y[i] /= sum; } } template <class T> inline void SoftmaxGrad(const int N, const int D, const T* x, const T* pre, T* y) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { T sum = T(0); for (int j = 0; j < D; ++j) { sum += x[i * D + j] * pre[i * D + j]; } for (int k = 0; k < D; ++k) { y[i * D + k] = x[i * D + k] * (pre[i * D + k] - sum); } } } /// cross-entropy /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M real data length /// \param in2 real value /// \param out template <class T> inline void CrossEntropy(const int N, const T *in1, const int M, const T *in2, T *out) { int class_num = N / M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { int label = (int)in2[i]; int index = i * class_num + label; out[0] += T(-1) * log(in1[index]); } out[0] /= M; } /// cross-entropy gradient /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M real data length /// \param in2 real value /// \param out template <class T> inline void CrossEntropyGrad(const int N, const T *in1, const int M, const T *in2, T *out) { int class_num = N / M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { int label = (int)in2[i]; int index = i * class_num + label; out[index] = T(-1.0) / in1[index]; } } /// rms loss /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M label data length /// \param in2 label value /// \param out template <class T> inline void RMSLoss(const int N, const T *in1, const int M, const T *in2, T *out) { if (N == M) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[0] += T(0.5) * (in1[i] - in2[i]) * (in1[i] - in2[i]); } } else { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { for (int j = 0; j < N / M; ++j) { int idx = static_cast<int>(in2[i]); if (j == idx) { out[0] += T(0.5) * (in1[i] - 1) * (in1[i] - 1); } else { out[0] += T(0.5) * in1[i] * in1[i]; } } } } out[0] /= M; } /// rms loss grad /// \tparam T /// \param N prediction data length /// \param in1 prediction value /// \param M label data length /// \param in2 label value /// \param out template <class T> inline void RMSLossGrad(const int N, const T *in1, const int M, const T *in2, T *out) { if (N == M) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { out[i] = (in1[i] - in2[i]); } } else { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { for (int j = 0; j < N / M; ++j) { int idx = static_cast<int>(in2[i]); if (j == idx) { out[i * M + j] = (in1[i * M + j] - 1); } else { out[i * M + j] = in1[i * M + j]; } } } } } template <class T> inline void SoftmaxCrossEntropy(const int N, const T *data, const int M, const T *label, T *out) { int class_num = N/M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { T sum = T(0.0); for (int j = 0; j < class_num; ++j) { sum += exp(data[i * class_num +j]); } out[0] += log(sum) - data[static_cast<int>(label[i])]; } out[0] /= M; } template <class T> inline void SoftmaxCrossEntropyGrad(const int N, const T *data, const int M, const T *label, T *out) { int class_num = N / M; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { const T *d = data + i * class_num; T *o = out + i * class_num; Softmax<T>(class_num, d, o); o[static_cast<int>(label[i])] -= 1; } } template <class T> inline void Reduce(const int N, std::function<void(int)> func) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < N; ++i) { func(i); } } template <class T> inline void SumCopy(const int N, const T *in, const int M, T *out) { #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < M; ++i) { for (int j = 0; j < N / M; ++j) { out[i] += in[i * N / M + j]; } } } template <class T, int order> inline void Img2Col(const T *input, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, T *output) { if (order == 0) { const int output_h = (height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; // padding = 0; dilation = 1; if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 && pad_t == 0 && pad_b == 0) { for (auto k = 0; k < channels * kernel_h * kernel_w; k++) { const auto nip = k / (kernel_h * kernel_w); const auto rest = k % (kernel_h * kernel_w); const auto kh = rest / kernel_w; const auto kw = rest % kernel_w; auto* dst = output + nip * (kernel_h * kernel_w * output_h * output_w) + kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w); const auto* src = input + nip * (height * width); for (auto y = 0; y < output_h; y++) { const auto iy = y * stride_h + kh; const auto ix = kw; if (stride_w == 1) { memcpy( dst + (y * output_w), src + (iy * width + ix), sizeof(T) * output_w); } else { for (auto x = 0; x < output_w; x++) { memcpy( dst + (y * output_w + x), src + (iy * width + ix + x * stride_w), sizeof(T)); } } } } return; } // equal padding if (pad_l == pad_r && pad_t == pad_b) { const int pad_h = pad_t; const int pad_w = pad_l; const int channel_size = height * width; for (int channel = channels; channel--; input += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!isLess(input_row, height)) { for (int output_cols = output_w; output_cols; output_cols--) { *(output++) = 0; } } else { int input_col = -pad_w + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (isLess(input_col, width)) { *(output++) = input[input_row * width + input_col]; } else { *(output++) = 0; } input_col += stride_w; } } input_row += stride_h; } } } } return; } // base const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int channels_col = channels * kernel_h * kernel_w; for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_w; int h_offset = (c / kernel_w) % kernel_h; int c_im = c / kernel_h / kernel_w; for (int h = 0; h < height_col; ++h) { for (int w = 0; w < width_col; ++w) { int h_pad = h * stride_h - pad_t + h_offset * dilation_h; int w_pad = w * stride_w - pad_l + w_offset * dilation_w; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) { output[(c * height_col + h) * width_col + w] = input[(c_im * height + h_pad) * width + w_pad]; } else { output[(c * height_col + h) * width_col + w] = 0; } } } } } else if (order == 1) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h) { for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { memcpy(output, input + (ih * width + iw) * channels, sizeof(T) * channels); } else { memset(output, 0, sizeof(T) * channels); } output += channels; } } w_pad += stride_w; } h_pad += stride_h; } } else { Logger::Global()->Fatal("Img2Col do not support other image order except NCHW or NHWC \n"); } }; template <class T, int order> inline void Col2Img(const T *input, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, T *output) { memset(output, 0, height * width * channels* sizeof(T)); if (order == 0) { const int output_h = (height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 && pad_t == 0 && pad_b == 0) { for (auto k = 0; k < channels * kernel_h * kernel_w; k++) { const auto nip = k / (kernel_h * kernel_w); const auto rest = k % (kernel_h * kernel_w); const auto kh = rest / kernel_w; const auto kw = rest % kernel_w; const auto* dst = input + nip * (kernel_h * kernel_w * output_h * output_w) + kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w); auto* src = output + nip * (height * width); for (auto y = 0; y < output_h; y++) { const auto iy = y * stride_h + kh; const auto ix = kw; if (stride_w == 1) { auto offsrc = src + (iy * width + ix); const auto offdst = dst + (y * output_w); for (auto i = 0; i < output_w; ++i) { offsrc[i] += offdst[i]; } } else { for (auto x = 0; x < output_w; x++) { auto offsrc = src + (iy * width + ix + x * stride_w); const auto offdst = dst + (y * output_w + x); *offsrc += *offdst; } } } } return; } if (pad_l == pad_r && pad_t == pad_b) { // From Intel, https://github.com/BVLC/caffe/pull/3536 const int pad_h = pad_t; const int pad_w = pad_l; const int channel_size = height * width; for (int channel = channels; channel--; output += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!isLess(input_row, height)) { input += output_w; } else { int input_col = -pad_w + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (isLess(input_col, width)) { output[input_row * width + input_col] += *input; } input++; input_col += stride_w; } } input_row += stride_h; } } } } return; } const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int channels_col = channels * kernel_h * kernel_w; for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_w; int h_offset = (c / kernel_w) % kernel_h; int c_im = c / kernel_h / kernel_w; for (int h = 0; h < height_col; ++h) { for (int w = 0; w < width_col; ++w) { int h_pad = h * stride_h - pad_t + h_offset * dilation_h; int w_pad = w * stride_w - pad_l + w_offset * dilation_w; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) { output[(c_im * height + h_pad) * width + w_pad] += input[(c * height_col + h) * width_col + w]; } } } } } else if (order == 1) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h) { for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { auto* data_im_patch = output + (ih * width + iw) * channels; Add<T>(channels, data_im_patch, input, data_im_patch); } input += channels; } } w_pad += stride_w; } h_pad += stride_h; } } else { Logger::Global()->Fatal("Col2Img do not support other image order except NCHW or NHWC \n"); } }; template <class T> inline void Img2ColNd(const T *input, const int *imageShape, const int *dataShape, const int * kernel, const int *stride, const int * dilation, const int * padding, const int N, T *output, bool col2img = false) { int kernel_size = 1; for (int i = 0; i < N; ++i) { kernel_size *= kernel[i]; } const int channels_col = dataShape[0]; std::vector<int> d_offset(N, 0); std::vector<int> d_iter(N, 0); for (int c_col = 0; c_col < channels_col; ++c_col) { int offset = c_col; for (int d_i = N - 1; d_i >= 0; --d_i) { if (d_i < N - 1) { offset /= kernel[d_i + 1]; } d_offset[d_i] = offset % kernel[d_i]; } for (bool incremented = true; incremented;) { int index_col = c_col; int index_im = c_col / kernel_size; bool is_padding = false; for (int d_i = 0; d_i < N; ++d_i) { const int d = d_iter[d_i]; const int d_im = d * stride[d_i] - padding[d_i] + d_offset[d_i] * dilation[d_i]; is_padding |= d_im < 0 || d_im >= imageShape[d_i + 1]; index_col *= dataShape[d_i + 1]; index_col += d; index_im *= imageShape[d_i + 1]; index_im += d_im; } if (!col2img) { if (is_padding) { output[index_col] = 0; } else { output[index_col] = input[index_im]; } } else if (!is_padding) { // col2im output[index_im] += input[index_col]; } incremented = false; for (int d_i = N - 1; d_i >= 0; --d_i) { const int d_max = dataShape[d_i + 1]; if (d_iter[d_i] < d_max) { Logger::Global()->Fatal("Img2ColNd d_iter[%d] less then d_max\n", d_i); } if (d_iter[d_i] == d_max - 1) { d_iter[d_i] = 0; } else { // d_iter[d_i] < d_max - 1 ++d_iter[d_i]; incremented = true; break; } } } } }; template <class T> inline void Col2ImgNd(const T *input, const int *imageShape, const int *dataShape, const int * kernel, const int *stride, const int * dilation, const int * padding, const int N, T *output) { int imageSize = 1; for (int i = 0; i < N; ++i) { imageSize *= imageShape[i]; } memset(output, 0, sizeof(T) * imageSize); Img2ColNd(input, imageShape, dataShape, kernel, stride, dilation, padding, N, output, true); } template<class T> inline void img2col(const T *input, const int input_channels, const int input_width, const int input_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, T *output) { const int output_width = (input_width + 2 * padding_width - (dilation_width * (filter_width - 1) + 1)) / stride_width + 1; const int output_height = (input_height + 2 * padding_height - (dilation_height * (filter_height - 1) + 1)) / stride_height + 1; const int col_channels = input_channels * filter_width * filter_height; #ifdef USE_MP #pragma omp parallel for #endif for (int c = 0; c < col_channels; ++c) { int w_offset = c % filter_width; int h_offset = (c / filter_width) % filter_height; int c_im = c / filter_width / filter_height; for (int h = 0; h < output_height; ++h) { for (int w = 0; w < output_width; ++w) { int imRowIdx = h * stride_height + h_offset * dilation_height; int imColIdx = w * stride_width + w_offset * dilation_width; if ((imRowIdx - padding_height) < 0 || (imRowIdx - padding_height) >= input_height || (imColIdx - padding_width) < 0 || (imColIdx - padding_width) >= input_width) { output[(c * output_height + h) * output_width + w] = T(0); } else { imRowIdx += c_im * input_height - padding_height; imColIdx -= padding_width; output[(c * output_height + h) * output_width + w] = input[imRowIdx * input_width + imColIdx]; } } } } } template<class T> inline void col2img(T *input, const int input_channels, const int input_width, const int input_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, const T *output) { const int output_width = (input_width + 2 * padding_width - (dilation_width * (filter_width - 1) + 1)) / stride_width + 1; const int output_height = (input_height + 2 * padding_height - (dilation_height * (filter_height - 1) + 1)) / stride_height + 1; const int col_channels = input_channels * filter_width * filter_height; #ifdef USE_MP #pragma omp parallel for #endif for (int c = 0; c < col_channels; ++c) { int w_offset = c % filter_width; int h_offset = (c / filter_width) % filter_height; int c_im = c / filter_width / filter_height; for (int h = 0; h < output_height; ++h) { for (int w = 0; w < output_width; ++w) { int imRowIdx = h * stride_height + h_offset * dilation_height; int imColIdx = w * stride_width + w_offset * dilation_width; imRowIdx -= padding_height; imColIdx -= padding_width; if (imRowIdx >= 0 && imRowIdx < input_height && imColIdx >= 0 && imColIdx < input_width) { int input_idx = (imRowIdx + c_im * input_height) * input_width + imColIdx; int output_idx = (c * output_height + h) * output_width + w; input[input_idx] += output[output_idx]; } } } } } template<class T> inline void NaiveConv(const T *input, const int batch_size, const int input_channels, const int input_width, const int input_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, const int output_channels, const T *filter, T *output) { const int output_width = (input_width + 2 * padding_width - (dilation_width * (filter_width - 1) + 1)) / stride_width + 1; const int output_height = (input_height + 2 * padding_height - (dilation_height * (filter_height - 1) + 1)) / stride_height + 1; #ifdef USE_MP #pragma omp parallel for #endif for (int batch = 0; batch < batch_size; ++batch) { for (int out_channel = 0; out_channel <output_channels ; ++out_channel) { for (int out_h = 0; out_h < output_height; ++out_h) { for (int out_w = 0; out_w < output_width; ++out_w) { const int inStartH = (out_h * stride_height) - padding_height; const int inStartW = (out_w * stride_width) - padding_width; T outValue = (T)0; for (int in_channel = 0; in_channel < input_channels; ++in_channel) { for (int filter_h = 0; filter_h < filter_height; ++filter_h) { for (int filter_w = 0; filter_w < filter_width; ++filter_w) { T inValue; const int inH = inStartH + filter_h; const int inW = inStartW + filter_w; if ((inH >= 0 && inH < input_height) && (inW >= 0 && inW < input_width)) { int offsetInput = batch * input_channels * input_height * input_width + in_channel * input_height * input_width + inH * input_width + inW; inValue = input[offsetInput]; } else { inValue = (T)0; } int offsetFilter = out_channel * input_channels * filter_height * filter_width + in_channel * filter_height * filter_width + filter_h * filter_width + filter_w; T filterValue = filter[offsetFilter]; outValue += (inValue * filterValue); } } } int offset = batch * output_channels * output_height * output_width + out_channel * output_height * output_width + out_h * output_width + out_w; output[offset] = outValue; } } } } } template <class T> inline void pooling2D(const T *input, const int batch_size, const int channel, const int input_width, const int input_height, const int output_width, const int output_height, const int stride_width, const int stride_height, const int padding_width, const int padding_height, const int filter_width, const int filter_height, const int dilation_width, const int dilation_height, T *output, int type = 0, T *mask = nullptr) { const int input_stride = input_height * input_width; const int output_stride = output_height * output_width; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < batch_size; ++i) { for (int c = 0; c < channel; ++c) { for (int ph = 0; ph < output_height; ++ph) { int hstart = ph * stride_height - padding_height; int hend = std::min(hstart + filter_height, input_height); hstart = std::max(hstart, 0); for (int pw = 0; pw < output_width; ++pw) { int wstart = pw * stride_width - padding_width; int wend = std::min(wstart + filter_width, input_width); wstart = std::max(wstart, 0); T ele; if (type == 0) { ele = input[hstart * input_width + wstart]; int index = hstart * input_width + wstart; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input[h * input_width + w]) { ele = input[h * input_width + w]; index = h * input_width + w; } } } output[ph * output_width + pw] = ele; if (mask != nullptr) { mask[ph * output_width + pw] = T(index); } } else if (type == 1) { ele = T(0); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { ele += input[h * input_width + w]; } } output[ph * output_width + pw] = ele / (hend * wend); } else { Logger::Global()->Fatal("not Implementation Pooling2D with other PoolType"); } } } input += input_stride; output += output_stride; if (mask != nullptr) { mask += output_stride; } } } } template <class T> inline void NHWC2NCHW(const T * input, const int num, const int inH, const int inW, const int inC, T *output) { #ifdef USE_MP #pragma omp parallel for #endif for (int n = 0; n < num; ++n) { for (int h = 0; h < inH; ++h) { for (int w = 0; w < inW; ++w) { for (int c = 0; c < inC; ++c) { output[((n * inC + c) * inH + h) * inW + w] = *(input++); } } } } } template <class T> inline void NCHW2NHWC(const T * input, const int num, const int inC, const int inH, const int inW, T *output) { #ifdef USE_MP #pragma omp parallel for #endif for (int n = 0; n < num; ++n) { for (int c = 0; c < inC; ++c) { for (int h = 0; h < inH; ++h) { for (int w = 0; w < inW; ++w) { output[((n * inH + h) * inW + w) * inC + c] = *(input++); } } } } } template <class I, class R> inline std::vector<R> Map(std::function<R(I)> fun, const std::vector<I>& vec) { std::vector<R> res; res.reserve(vec.size()); #ifdef USE_MP #pragma omp parallel for #endif for (auto& i : vec) { res.push_back(fun(i)); } return res; }; template <typename R, typename I> inline std::vector<R> Map(std::function<R(I)> fun, std::vector<I>&& vec) { return Map<R, I>(fun, vec); } template <typename I> inline I Reduce(std::function<I(const I&, const I&)> func, I initVal, const std::vector<I>& vec) { I res = initVal; #ifdef USE_MP #pragma omp parallel for #endif for (int i = 0; i < vec.size(); ++i) { res = func(res, vec.at(i)); } return res; } template <typename I> inline I Reduce(std::function<I(const I&, const I&)> func, const std::vector<I>& vec) { const std::vector<I> v(vec.begin() + 1, vec.end()); return Reduce(func, vec.at(0), v); } template <typename I> inline I Reduce(std::function<I(I&&, I&&)> func, I&& initVal, std::vector<I>&& vec) { #ifdef USE_MP #pragma omp parallel for #endif I res = std::move(initVal); for (int i = 0; i < vec.size(); ++i) { res = func(std::move(res), std::move(vec.at(i))); } return res; } template <typename I> I Reduce(std::function<I(I&&, I&&)> func, std::vector<I>&& vec) { #ifdef USE_MP #pragma omp parallel for #endif I res = std::move(vec.at(0)); for (int i = 1; i < vec.size(); ++i) { res = func(std::move(res), std::move(vec.at(i))); } return res; } template <typename R, typename I> inline R MapReduce(std::function<R(R, I, bool)> func, const std::vector<I>& vec) { #ifdef USE_MP #pragma omp parallel for #endif R res = func(R(), vec.at(0), true); for (int i = 1; i < vec.size(); ++i) { res = func(res, vec.at(i), false); } return res; } template< class T> inline std::vector<T> Filter(std::function<bool(const T)> func, const std::vector<T> &input) { std::vector<T> res; res.reserve(input.size()); for (const auto &i : input) { if (func(i)) { res.push_back(i); } } res.shrink_to_fit(); return res; } } #endif //MATRIX_MATH_H
pdgstrs.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /*! @file * \brief Solves a system of distributed linear equations A*X = B with a * general N-by-N matrix A using the LU factors computed previously. * * <pre> * -- Distributed SuperLU routine (version 6.1) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 15, 2008 * September 18, 2018 version 6.0 * February 8, 2019 version 6.1.1 * </pre> */ #include <math.h> #include "superlu_ddefs.h" #ifndef CACHELINE #define CACHELINE 64 /* bytes, Xeon Phi KNL, Cori haswell, Edision */ #endif /* * Sketch of the algorithm for L-solve: * ======================= * * Self-scheduling loop: * * while ( not finished ) { .. use message counter to control * * reveive a message; * * if ( message is Xk ) { * perform local block modifications into lsum[]; * lsum[i] -= L_i,k * X[k] * if all local updates done, Isend lsum[] to diagonal process; * * } else if ( message is LSUM ) { .. this must be a diagonal process * accumulate LSUM; * if ( all LSUM are received ) { * perform triangular solve for Xi; * Isend Xi down to the current process column; * perform local block modifications into lsum[]; * } * } * } * * * Auxiliary data structures: lsum[] / ilsum (pointer to lsum array) * ======================= * * lsum[] array (local) * + lsum has "nrhs" columns, row-wise is partitioned by supernodes * + stored by row blocks, column wise storage within a row block * + prepend a header recording the global block number. * * lsum[] ilsum[nsupers + 1] * * ----- * | | | <- header of size 2 --- * --------- <--------------------| | * | | | | | --- * | | | | | |-----------| | * | | | | | | --- * --------- | |-------| | * | | | <- header | | --- * --------- <--------| | |----| | * | | | | | | | --- * | | | | | | | * | | | | | | | * --------- | | * | | | <- header | | * --------- <------------| | * | | | | | | * | | | | | | * | | | | | | * --------- <---------------| */ /*#define ISEND_IRECV*/ /* * Function prototypes */ #ifdef _CRAY fortran void STRSM(_fcd, _fcd, _fcd, _fcd, int*, int*, double*, double*, int*, double*, int*); _fcd ftcs1; _fcd ftcs2; _fcd ftcs3; #endif /*! \brief * * <pre> * Purpose * ======= * Re-distribute B on the diagonal processes of the 2D process mesh. * * Note * ==== * This routine can only be called after the routine pxgstrs_init(), * in which the structures of the send and receive buffers are set up. * * Arguments * ========= * * B (input) double* * The distributed right-hand side matrix of the possibly * equilibrated system. * * m_loc (input) int (local) * The local row dimension of matrix B. * * nrhs (input) int (global) * Number of right-hand sides. * * ldb (input) int (local) * Leading dimension of matrix B. * * fst_row (input) int (global) * The row number of B's first row in the global matrix. * * ilsum (input) int* (global) * Starting position of each supernode in a full array. * * x (output) double* * The solution vector. It is valid only on the diagonal processes. * * ScalePermstruct (input) ScalePermstruct_t* * The data structure to store the scaling and permutation vectors * describing the transformations performed to the original matrix A. * * grid (input) gridinfo_t* * The 2D process mesh. * * SOLVEstruct (input) SOLVEstruct_t* * Contains the information for the communication during the * solution phase. * * Return value * ============ * </pre> */ int_t pdReDistribute_B_to_X(double *B, int_t m_loc, int nrhs, int_t ldb, int_t fst_row, int_t *ilsum, double *x, ScalePermstruct_t *ScalePermstruct, Glu_persist_t *Glu_persist, gridinfo_t *grid, SOLVEstruct_t *SOLVEstruct) { int *SendCnt, *SendCnt_nrhs, *RecvCnt, *RecvCnt_nrhs; int *sdispls, *sdispls_nrhs, *rdispls, *rdispls_nrhs; int *ptr_to_ibuf, *ptr_to_dbuf; int_t *perm_r, *perm_c; /* row and column permutation vectors */ int_t *send_ibuf, *recv_ibuf; double *send_dbuf, *recv_dbuf; int_t *xsup, *supno; int_t i, ii, irow, gbi, j, jj, k, knsupc, l, lk, nbrow; int p, procs; pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm; MPI_Request req_i, req_d, *req_send, *req_recv; MPI_Status status, *status_send, *status_recv; int Nreq_recv, Nreq_send, pp, pps, ppr; double t; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Enter pdReDistribute_B_to_X()"); #endif /* ------------------------------------------------------------ INITIALIZATION. ------------------------------------------------------------*/ perm_r = ScalePermstruct->perm_r; perm_c = ScalePermstruct->perm_c; procs = grid->nprow * grid->npcol; xsup = Glu_persist->xsup; supno = Glu_persist->supno; SendCnt = gstrs_comm->B_to_X_SendCnt; SendCnt_nrhs = gstrs_comm->B_to_X_SendCnt + procs; RecvCnt = gstrs_comm->B_to_X_SendCnt + 2*procs; RecvCnt_nrhs = gstrs_comm->B_to_X_SendCnt + 3*procs; sdispls = gstrs_comm->B_to_X_SendCnt + 4*procs; sdispls_nrhs = gstrs_comm->B_to_X_SendCnt + 5*procs; rdispls = gstrs_comm->B_to_X_SendCnt + 6*procs; rdispls_nrhs = gstrs_comm->B_to_X_SendCnt + 7*procs; ptr_to_ibuf = gstrs_comm->ptr_to_ibuf; ptr_to_dbuf = gstrs_comm->ptr_to_dbuf; /* ------------------------------------------------------------ NOW COMMUNICATE THE ACTUAL DATA. ------------------------------------------------------------*/ if(procs==1){ // faster memory copy when procs=1 #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { // t = SuperLU_timer_(); #ifdef _OPENMP #pragma omp taskloop private (i,l,irow,k,j,knsupc) untied #endif for (i = 0; i < m_loc; ++i) { irow = perm_c[perm_r[i+fst_row]]; /* Row number in Pc*Pr*B */ k = BlockNum( irow ); knsupc = SuperSize( k ); l = X_BLK( k ); x[l - XK_H] = k; /* Block number prepended in the header. */ irow = irow - FstBlockC(k); /* Relative row number in X-block */ RHS_ITERATE(j) { x[l + irow + j*knsupc] = B[i + j*ldb]; } } } } }else{ k = sdispls[procs-1] + SendCnt[procs-1]; /* Total number of sends */ l = rdispls[procs-1] + RecvCnt[procs-1]; /* Total number of receives */ if ( !(send_ibuf = intMalloc_dist(k + l)) ) ABORT("Malloc fails for send_ibuf[]."); recv_ibuf = send_ibuf + k; if ( !(send_dbuf = doubleMalloc_dist((k + l)* (size_t)nrhs)) ) ABORT("Malloc fails for send_dbuf[]."); recv_dbuf = send_dbuf + k * nrhs; if ( !(req_send = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_send[]."); if ( !(req_recv = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_recv[]."); if ( !(status_send = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_send[]."); if ( !(status_recv = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_recv[]."); for (p = 0; p < procs; ++p) { ptr_to_ibuf[p] = sdispls[p]; ptr_to_dbuf[p] = sdispls[p] * nrhs; } /* Copy the row indices and values to the send buffer. */ // t = SuperLU_timer_(); for (i = 0, l = fst_row; i < m_loc; ++i, ++l) { irow = perm_c[perm_r[l]]; /* Row number in Pc*Pr*B */ gbi = BlockNum( irow ); p = PNUM( PROW(gbi,grid), PCOL(gbi,grid), grid ); /* Diagonal process */ k = ptr_to_ibuf[p]; send_ibuf[k] = irow; ++ptr_to_ibuf[p]; k = ptr_to_dbuf[p]; RHS_ITERATE(j) { /* RHS is stored in row major in the buffer. */ send_dbuf[k++] = B[i + j*ldb]; } ptr_to_dbuf[p] += nrhs; } // t = SuperLU_timer_() - t; // printf(".. copy to send buffer time\t%8.4f\n", t); #if 0 #if 1 /* Communicate the (permuted) row indices. */ MPI_Alltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm); /* Communicate the numerical values. */ MPI_Alltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs, MPI_DOUBLE, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, MPI_DOUBLE, grid->comm); #else /* Communicate the (permuted) row indices. */ MPI_Ialltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm, &req_i); /* Communicate the numerical values. */ MPI_Ialltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs, MPI_DOUBLE, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, MPI_DOUBLE, grid->comm, &req_d); MPI_Wait(&req_i,&status); MPI_Wait(&req_d,&status); #endif #endif MPI_Barrier( grid->comm ); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt[pps]>0){ MPI_Isend(&send_ibuf[sdispls[pps]], SendCnt[pps], mpi_int_t, pps, 0, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt[ppr]>0){ MPI_Irecv(&recv_ibuf[rdispls[ppr]], RecvCnt[ppr], mpi_int_t, ppr, 0, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt_nrhs[pps]>0){ MPI_Isend(&send_dbuf[sdispls_nrhs[pps]], SendCnt_nrhs[pps], MPI_DOUBLE, pps, 1, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt_nrhs[ppr]>0){ MPI_Irecv(&recv_dbuf[rdispls_nrhs[ppr]], RecvCnt_nrhs[ppr], MPI_DOUBLE, ppr, 1, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); /* ------------------------------------------------------------ Copy buffer into X on the diagonal processes. ------------------------------------------------------------*/ // t = SuperLU_timer_(); ii = 0; for (p = 0; p < procs; ++p) { jj = rdispls_nrhs[p]; for (i = 0; i < RecvCnt[p]; ++i) { /* Only the diagonal processes do this; the off-diagonal processes have 0 RecvCnt. */ irow = recv_ibuf[ii]; /* The permuted row index. */ k = BlockNum( irow ); knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number. */ l = X_BLK( lk ); x[l - XK_H] = k; /* Block number prepended in the header. */ irow = irow - FstBlockC(k); /* Relative row number in X-block */ RHS_ITERATE(j) { x[l + irow + j*knsupc] = recv_dbuf[jj++]; } ++ii; } } // t = SuperLU_timer_() - t; // printf(".. copy to x time\t%8.4f\n", t); SUPERLU_FREE(send_ibuf); SUPERLU_FREE(send_dbuf); SUPERLU_FREE(req_send); SUPERLU_FREE(req_recv); SUPERLU_FREE(status_send); SUPERLU_FREE(status_recv); } #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Exit pdReDistribute_B_to_X()"); #endif return 0; } /* pdReDistribute_B_to_X */ /*! \brief * * <pre> * Purpose * ======= * Re-distribute X on the diagonal processes to B distributed on all * the processes. * * Note * ==== * This routine can only be called after the routine pxgstrs_init(), * in which the structures of the send and receive buffers are set up. * </pre> */ int_t pdReDistribute_X_to_B(int_t n, double *B, int_t m_loc, int_t ldb, int_t fst_row, int_t nrhs, double *x, int_t *ilsum, ScalePermstruct_t *ScalePermstruct, Glu_persist_t *Glu_persist, gridinfo_t *grid, SOLVEstruct_t *SOLVEstruct) { int_t i, ii, irow, j, jj, k, knsupc, nsupers, l, lk; int_t *xsup, *supno; int *SendCnt, *SendCnt_nrhs, *RecvCnt, *RecvCnt_nrhs; int *sdispls, *rdispls, *sdispls_nrhs, *rdispls_nrhs; int *ptr_to_ibuf, *ptr_to_dbuf; int_t *send_ibuf, *recv_ibuf; double *send_dbuf, *recv_dbuf; int_t *row_to_proc = SOLVEstruct->row_to_proc; /* row-process mapping */ pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm; int iam, p, q, pkk, procs; int_t num_diag_procs, *diag_procs; MPI_Request req_i, req_d, *req_send, *req_recv; MPI_Status status, *status_send, *status_recv; int Nreq_recv, Nreq_send, pp,pps,ppr; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Enter pdReDistribute_X_to_B()"); #endif /* ------------------------------------------------------------ INITIALIZATION. ------------------------------------------------------------*/ xsup = Glu_persist->xsup; supno = Glu_persist->supno; nsupers = Glu_persist->supno[n-1] + 1; iam = grid->iam; procs = grid->nprow * grid->npcol; SendCnt = gstrs_comm->X_to_B_SendCnt; SendCnt_nrhs = gstrs_comm->X_to_B_SendCnt + procs; RecvCnt = gstrs_comm->X_to_B_SendCnt + 2*procs; RecvCnt_nrhs = gstrs_comm->X_to_B_SendCnt + 3*procs; sdispls = gstrs_comm->X_to_B_SendCnt + 4*procs; sdispls_nrhs = gstrs_comm->X_to_B_SendCnt + 5*procs; rdispls = gstrs_comm->X_to_B_SendCnt + 6*procs; rdispls_nrhs = gstrs_comm->X_to_B_SendCnt + 7*procs; ptr_to_ibuf = gstrs_comm->ptr_to_ibuf; ptr_to_dbuf = gstrs_comm->ptr_to_dbuf; if(procs==1){ //faster memory copy when procs=1 #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { // t = SuperLU_timer_(); #ifdef _OPENMP #pragma omp taskloop private (k,knsupc,lk,irow,l,i,j) untied #endif for (k = 0; k < nsupers; k++) { knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number */ irow = FstBlockC( k ); l = X_BLK( lk ); for (i = 0; i < knsupc; ++i) { RHS_ITERATE(j) { /* RHS is stored in row major in the buffer. */ B[irow-fst_row +i + j*ldb] = x[l + i + j*knsupc]; } } } } } }else{ k = sdispls[procs-1] + SendCnt[procs-1]; /* Total number of sends */ l = rdispls[procs-1] + RecvCnt[procs-1]; /* Total number of receives */ if ( !(send_ibuf = intMalloc_dist(k + l)) ) ABORT("Malloc fails for send_ibuf[]."); recv_ibuf = send_ibuf + k; if ( !(send_dbuf = doubleMalloc_dist((k + l)*nrhs)) ) ABORT("Malloc fails for send_dbuf[]."); if ( !(req_send = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_send[]."); if ( !(req_recv = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_recv[]."); if ( !(status_send = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_send[]."); if ( !(status_recv = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_recv[]."); recv_dbuf = send_dbuf + k * nrhs; for (p = 0; p < procs; ++p) { ptr_to_ibuf[p] = sdispls[p]; ptr_to_dbuf[p] = sdispls_nrhs[p]; } num_diag_procs = SOLVEstruct->num_diag_procs; diag_procs = SOLVEstruct->diag_procs; for (p = 0; p < num_diag_procs; ++p) { /* For all diagonal processes. */ pkk = diag_procs[p]; if ( iam == pkk ) { for (k = p; k < nsupers; k += num_diag_procs) { knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number */ irow = FstBlockC( k ); l = X_BLK( lk ); for (i = 0; i < knsupc; ++i) { #if 0 ii = inv_perm_c[irow]; /* Apply X <== Pc'*Y */ #else ii = irow; #endif q = row_to_proc[ii]; jj = ptr_to_ibuf[q]; send_ibuf[jj] = ii; jj = ptr_to_dbuf[q]; RHS_ITERATE(j) { /* RHS stored in row major in buffer. */ send_dbuf[jj++] = x[l + i + j*knsupc]; } ++ptr_to_ibuf[q]; ptr_to_dbuf[q] += nrhs; ++irow; } } } } /* ------------------------------------------------------------ COMMUNICATE THE (PERMUTED) ROW INDICES AND NUMERICAL VALUES. ------------------------------------------------------------*/ #if 0 #if 1 MPI_Alltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm); MPI_Alltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs,MPI_DOUBLE, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, MPI_DOUBLE, grid->comm); #else MPI_Ialltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm,&req_i); MPI_Ialltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs, MPI_DOUBLE, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, MPI_DOUBLE, grid->comm,&req_d); MPI_Wait(&req_i,&status); MPI_Wait(&req_d,&status); #endif #endif MPI_Barrier( grid->comm ); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt[pps]>0){ MPI_Isend(&send_ibuf[sdispls[pps]], SendCnt[pps], mpi_int_t, pps, 0, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt[ppr]>0){ MPI_Irecv(&recv_ibuf[rdispls[ppr]], RecvCnt[ppr], mpi_int_t, ppr, 0, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); // MPI_Barrier( grid->comm ); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt_nrhs[pps]>0){ MPI_Isend(&send_dbuf[sdispls_nrhs[pps]], SendCnt_nrhs[pps], MPI_DOUBLE, pps, 1, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt_nrhs[ppr]>0){ MPI_Irecv(&recv_dbuf[rdispls_nrhs[ppr]], RecvCnt_nrhs[ppr], MPI_DOUBLE, ppr, 1, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); // MPI_Barrier( grid->comm ); /* ------------------------------------------------------------ COPY THE BUFFER INTO B. ------------------------------------------------------------*/ for (i = 0, k = 0; i < m_loc; ++i) { irow = recv_ibuf[i]; irow -= fst_row; /* Relative row number */ RHS_ITERATE(j) { /* RHS is stored in row major in the buffer. */ B[irow + j*ldb] = recv_dbuf[k++]; } } SUPERLU_FREE(send_ibuf); SUPERLU_FREE(send_dbuf); SUPERLU_FREE(req_send); SUPERLU_FREE(req_recv); SUPERLU_FREE(status_send); SUPERLU_FREE(status_recv); } #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Exit pdReDistribute_X_to_B()"); #endif return 0; } /* pdReDistribute_X_to_B */ /*! \brief * * <pre> * Purpose * ======= * Compute the inverse of the diagonal blocks of the L and U * triangular matrices. * </pre> */ void pdCompute_Diag_Inv(int_t n, LUstruct_t *LUstruct,gridinfo_t *grid, SuperLUStat_t *stat, int *info) { #ifdef SLU_HAVE_LAPACK Glu_persist_t *Glu_persist = LUstruct->Glu_persist; LocalLU_t *Llu = LUstruct->Llu; double *lusup; double *recvbuf, *tempv; double *Linv;/* Inverse of diagonal block */ double *Uinv;/* Inverse of diagonal block */ int_t kcol, krow, mycol, myrow; int_t i, ii, il, j, jj, k, lb, ljb, lk, lptr, luptr; int_t nb, nlb,nlb_nodiag, nub, nsupers; int_t *xsup, *supno, *lsub, *usub; int_t *ilsum; /* Starting position of each supernode in lsum (LOCAL)*/ int Pc, Pr, iam; int knsupc, nsupr; int ldalsum; /* Number of lsum entries locally owned. */ int maxrecvsz, p, pi; int_t **Lrowind_bc_ptr; double **Lnzval_bc_ptr; double **Linv_bc_ptr; double **Uinv_bc_ptr; int INFO; double t; double one = 1.0; double zero = 0.0; #if ( PROFlevel>=1 ) t = SuperLU_timer_(); #endif #if ( PRNTlevel>=2 ) if ( grid->iam==0 ) { printf("computing inverse of diagonal blocks...\n"); fflush(stdout); } #endif /* * Initialization. */ iam = grid->iam; Pc = grid->npcol; Pr = grid->nprow; myrow = MYROW( iam, grid ); mycol = MYCOL( iam, grid ); xsup = Glu_persist->xsup; supno = Glu_persist->supno; nsupers = supno[n-1] + 1; Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; Linv_bc_ptr = Llu->Linv_bc_ptr; Uinv_bc_ptr = Llu->Uinv_bc_ptr; Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */ Llu->inv = 1; /*--------------------------------------------------- * Compute inverse of L(lk,lk). *---------------------------------------------------*/ for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { lk = LBi( k, grid ); /* local block number */ kcol = PCOL( k, grid ); if ( mycol == kcol ) { /* diagonal process */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; Linv = Linv_bc_ptr[lk]; Uinv = Uinv_bc_ptr[lk]; nsupr = lsub[1]; knsupc = SuperSize( k ); for (j=0 ; j<knsupc; j++){ for (i=0 ; i<knsupc; i++){ Linv[j*knsupc+i] = zero; Uinv[j*knsupc+i] = zero; } } for (j=0 ; j<knsupc; j++){ Linv[j*knsupc+j] = one; for (i=j+1 ; i<knsupc; i++){ Linv[j*knsupc+i] = lusup[j*nsupr+i]; } for (i=0 ; i<j+1; i++){ Uinv[j*knsupc+i] = lusup[j*nsupr+i]; } } /* Triangular inversion */ dtrtri_("L","U",&knsupc,Linv,&knsupc,&INFO); dtrtri_("U","N",&knsupc,Uinv,&knsupc,&INFO); } /* end if (mycol === kcol) */ } /* end if (myrow === krow) */ } /* end fo k = ... nsupers */ #if ( PROFlevel>=1 ) if( grid->iam==0 ) { t = SuperLU_timer_() - t; printf(".. L-diag_inv time\t%10.5f\n", t); fflush(stdout); } #endif return; #endif /* SLU_HAVE_LAPACK */ } /*! \brief * * <pre> * Purpose * ======= * * PDGSTRS solves a system of distributed linear equations * A*X = B with a general N-by-N matrix A using the LU factorization * computed by PDGSTRF. * If the equilibration, and row and column permutations were performed, * the LU factorization was performed for A1 where * A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U * and the linear system solved is * A1 * Y = Pc*Pr*B1, where B was overwritten by B1 = diag(R)*B, and * the permutation to B1 by Pc*Pr is applied internally in this routine. * * Arguments * ========= * * n (input) int (global) * The order of the system of linear equations. * * LUstruct (input) LUstruct_t* * The distributed data structures storing L and U factors. * The L and U factors are obtained from PDGSTRF for * the possibly scaled and permuted matrix A. * See superlu_ddefs.h for the definition of 'LUstruct_t'. * A may be scaled and permuted into A1, so that * A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U * * grid (input) gridinfo_t* * The 2D process mesh. It contains the MPI communicator, the number * of process rows (NPROW), the number of process columns (NPCOL), * and my process rank. It is an input argument to all the * parallel routines. * Grid can be initialized by subroutine SUPERLU_GRIDINIT. * See superlu_defs.h for the definition of 'gridinfo_t'. * * B (input/output) double* * On entry, the distributed right-hand side matrix of the possibly * equilibrated system. That is, B may be overwritten by diag(R)*B. * On exit, the distributed solution matrix Y of the possibly * equilibrated system if info = 0, where Y = Pc*diag(C)^(-1)*X, * and X is the solution of the original system. * * m_loc (input) int (local) * The local row dimension of matrix B. * * fst_row (input) int (global) * The row number of B's first row in the global matrix. * * ldb (input) int (local) * The leading dimension of matrix B. * * nrhs (input) int (global) * Number of right-hand sides. * * SOLVEstruct (input) SOLVEstruct_t* (global) * Contains the information for the communication during the * solution phase. * * stat (output) SuperLUStat_t* * Record the statistics about the triangular solves. * See util.h for the definition of 'SuperLUStat_t'. * * info (output) int* * = 0: successful exit * < 0: if info = -i, the i-th argument had an illegal value * </pre> */ void pdgstrs(int_t n, LUstruct_t *LUstruct, ScalePermstruct_t *ScalePermstruct, gridinfo_t *grid, double *B, int_t m_loc, int_t fst_row, int_t ldb, int nrhs, SOLVEstruct_t *SOLVEstruct, SuperLUStat_t *stat, int *info) { Glu_persist_t *Glu_persist = LUstruct->Glu_persist; LocalLU_t *Llu = LUstruct->Llu; double alpha = 1.0; double beta = 0.0; double zero = 0.0; double *lsum; /* Local running sum of the updates to B-components */ double *x; /* X component at step k. */ /* NOTE: x and lsum are of same size. */ double *lusup, *dest; double *recvbuf, *recvbuf_on, *tempv, *recvbufall, *recvbuf_BC_fwd, *recvbuf0, *xin; double *rtemp, *rtemp_loc; /* Result of full matrix-vector multiply. */ double *Linv; /* Inverse of diagonal block */ double *Uinv; /* Inverse of diagonal block */ int *ipiv; int_t *leaf_send; int_t nleaf_send, nleaf_send_tmp; int_t *root_send; int_t nroot_send, nroot_send_tmp; int_t **Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; /*-- Data structures used for broadcast and reduction trees. --*/ BcTree *LBtree_ptr = Llu->LBtree_ptr; RdTree *LRtree_ptr = Llu->LRtree_ptr; BcTree *UBtree_ptr = Llu->UBtree_ptr; RdTree *URtree_ptr = Llu->URtree_ptr; int_t *Urbs1; /* Number of row blocks in each block column of U. */ int_t *Urbs = Llu->Urbs; /* Number of row blocks in each block column of U. */ Ucb_indptr_t **Ucb_indptr = Llu->Ucb_indptr;/* Vertical linked list pointing to Uindex[] */ int_t **Ucb_valptr = Llu->Ucb_valptr; /* Vertical linked list pointing to Unzval[] */ int_t kcol, krow, mycol, myrow; int_t i, ii, il, j, jj, k, kk, lb, ljb, lk, lib, lptr, luptr, gb, nn; int_t nb, nlb,nlb_nodiag, nub, nsupers, nsupers_j, nsupers_i,maxsuper; int_t *xsup, *supno, *lsub, *usub; int_t *ilsum; /* Starting position of each supernode in lsum (LOCAL)*/ int Pc, Pr, iam; int knsupc, nsupr, nprobe; int nbtree, nrtree, outcount; int ldalsum; /* Number of lsum entries locally owned. */ int maxrecvsz, p, pi; int_t **Lrowind_bc_ptr; double **Lnzval_bc_ptr; double **Linv_bc_ptr; double **Uinv_bc_ptr; double sum; MPI_Status status,status_on,statusx,statuslsum; pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm; SuperLUStat_t **stat_loc; double tmax; /*-- Counts used for L-solve --*/ int_t *fmod; /* Modification count for L-solve -- Count the number of local block products to be summed into lsum[lk]. */ int_t fmod_tmp; int_t **fsendx_plist = Llu->fsendx_plist; int_t nfrecvx = Llu->nfrecvx; /* Number of X components to be recv'd. */ int_t nfrecvx_buf=0; int_t *frecv; /* Count of lsum[lk] contributions to be received from processes in this row. It is only valid on the diagonal processes. */ int_t frecv_tmp; int_t nfrecvmod = 0; /* Count of total modifications to be recv'd. */ int_t nfrecv = 0; /* Count of total messages to be recv'd. */ int_t nbrecv = 0; /* Count of total messages to be recv'd. */ int_t nleaf = 0, nroot = 0; int_t nleaftmp = 0, nroottmp = 0; int_t msgsize; /*-- Counts used for U-solve --*/ int_t *bmod; /* Modification count for U-solve. */ int_t bmod_tmp; int_t **bsendx_plist = Llu->bsendx_plist; int_t nbrecvx = Llu->nbrecvx; /* Number of X components to be recv'd. */ int_t nbrecvx_buf=0; int_t *brecv; /* Count of modifications to be recv'd from processes in this row. */ int_t nbrecvmod = 0; /* Count of total modifications to be recv'd. */ int_t flagx,flaglsum,flag; int_t *LBTree_active, *LRTree_active, *LBTree_finish, *LRTree_finish, *leafsups, *rootsups; int_t TAG; double t1_sol, t2_sol, t; #if ( DEBUGlevel>=2 ) int_t Ublocks = 0; #endif int_t gik,iklrow,fnz; int_t *mod_bit = Llu->mod_bit; /* flag contribution from each row block */ int INFO, pad; int_t tmpresult; // #if ( PROFlevel>=1 ) double t1, t2; float msg_vol = 0, msg_cnt = 0; // #endif int_t msgcnt[4]; /* Count the size of the message xfer'd in each buffer: * 0 : transferred in Lsub_buf[] * 1 : transferred in Lval_buf[] * 2 : transferred in Usub_buf[] * 3 : transferred in Uval_buf[] */ int iword = sizeof (int_t); int dword = sizeof (double); int Nwork; int_t procs = grid->nprow * grid->npcol; yes_no_t done; yes_no_t startforward; int nbrow; int_t ik, rel, idx_r, jb, nrbl, irow, pc,iknsupc; int_t lptr1_tmp, idx_i, idx_v,m; int_t ready; static int thread_id; yes_no_t empty; int_t sizelsum,sizertemp,aln_d,aln_i; aln_d = ceil(CACHELINE/(double)dword); aln_i = ceil(CACHELINE/(double)iword); int num_thread = 1; maxsuper = sp_ienv_dist(3); #ifdef _OPENMP #pragma omp threadprivate(thread_id) #endif #ifdef _OPENMP #pragma omp parallel default(shared) { if (omp_get_thread_num () == 0) { num_thread = omp_get_num_threads (); } thread_id = omp_get_thread_num (); } #endif #if ( PRNTlevel>=1 ) if( grid->iam==0 ) { printf("num_thread: %5d\n", num_thread); fflush(stdout); } #endif MPI_Barrier( grid->comm ); t1_sol = SuperLU_timer_(); t = SuperLU_timer_(); /* Test input parameters. */ *info = 0; if ( n < 0 ) *info = -1; else if ( nrhs < 0 ) *info = -9; if ( *info ) { pxerr_dist("PDGSTRS", grid, -*info); return; } /* * Initialization. */ iam = grid->iam; Pc = grid->npcol; Pr = grid->nprow; myrow = MYROW( iam, grid ); mycol = MYCOL( iam, grid ); xsup = Glu_persist->xsup; supno = Glu_persist->supno; nsupers = supno[n-1] + 1; Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; Linv_bc_ptr = Llu->Linv_bc_ptr; Uinv_bc_ptr = Llu->Uinv_bc_ptr; nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */ stat->utime[SOL_COMM] = 0.0; stat->utime[SOL_GEMM] = 0.0; stat->utime[SOL_TRSM] = 0.0; stat->utime[SOL_TOT] = 0.0; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Enter pdgstrs()"); #endif stat->ops[SOLVE] = 0.0; Llu->SolveMsgSent = 0; /* Save the count to be altered so it can be used by subsequent call to PDGSTRS. */ if ( !(fmod = intMalloc_dist(nlb*aln_i)) ) ABORT("Malloc fails for fmod[]."); for (i = 0; i < nlb; ++i) fmod[i*aln_i] = Llu->fmod[i]; if ( !(frecv = intCalloc_dist(nlb)) ) ABORT("Calloc fails for frecv[]."); Llu->frecv = frecv; if ( !(leaf_send = intMalloc_dist((CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i)) ) ABORT("Malloc fails for leaf_send[]."); nleaf_send=0; if ( !(root_send = intMalloc_dist((CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i)) ) ABORT("Malloc fails for root_send[]."); nroot_send=0; #ifdef _CRAY ftcs1 = _cptofcd("L", strlen("L")); ftcs2 = _cptofcd("N", strlen("N")); ftcs3 = _cptofcd("U", strlen("U")); #endif /* Obtain ilsum[] and ldalsum for process column 0. */ ilsum = Llu->ilsum; ldalsum = Llu->ldalsum; /* Allocate working storage. */ knsupc = sp_ienv_dist(3); maxrecvsz = knsupc * nrhs + SUPERLU_MAX( XK_H, LSUM_H ); sizelsum = (((size_t)ldalsum)*nrhs + nlb*LSUM_H); sizelsum = ((sizelsum + (aln_d - 1)) / aln_d) * aln_d; #ifdef _OPENMP if ( !(lsum = (double*)SUPERLU_MALLOC(sizelsum*num_thread * sizeof(double)))) ABORT("Malloc fails for lsum[]."); #pragma omp parallel default(shared) private(ii) { for (ii=0; ii<sizelsum; ii++) lsum[thread_id*sizelsum+ii]=zero; } #else if ( !(lsum = (double*)SUPERLU_MALLOC(sizelsum*num_thread * sizeof(double)))) ABORT("Malloc fails for lsum[]."); for ( ii=0; ii < sizelsum*num_thread; ii++ ) lsum[ii]=zero; #endif if ( !(x = doubleCalloc_dist(ldalsum * nrhs + nlb * XK_H)) ) ABORT("Calloc fails for x[]."); sizertemp=ldalsum * nrhs; sizertemp = ((sizertemp + (aln_d - 1)) / aln_d) * aln_d; if ( !(rtemp = (double*)SUPERLU_MALLOC((sizertemp*num_thread + 1) * sizeof(double))) ) ABORT("Malloc fails for rtemp[]."); #ifdef _OPENMP #pragma omp parallel default(shared) private(ii) { for ( ii=0; ii<sizertemp; ii++ ) rtemp[thread_id*sizertemp+ii]=zero; } #else for ( ii=0; ii<sizertemp*num_thread; ii++ ) rtemp[ii]=zero; #endif if ( !(stat_loc = (SuperLUStat_t**) SUPERLU_MALLOC(num_thread*sizeof(SuperLUStat_t*))) ) ABORT("Malloc fails for stat_loc[]."); for ( i=0; i<num_thread; i++) { stat_loc[i] = (SuperLUStat_t*)SUPERLU_MALLOC(sizeof(SuperLUStat_t)); PStatInit(stat_loc[i]); } #if ( DEBUGlevel>=2 ) /* Dump the L factor using matlab triple-let format. */ dDumpLblocks(iam, nsupers, grid, Glu_persist, Llu); #endif /*--------------------------------------------------- * Forward solve Ly = b. *---------------------------------------------------*/ /* Redistribute B into X on the diagonal processes. */ pdReDistribute_B_to_X(B, m_loc, nrhs, ldb, fst_row, ilsum, x, ScalePermstruct, Glu_persist, grid, SOLVEstruct); #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. B to X redistribute time\t%8.4f\n", t); fflush(stdout); t = SuperLU_timer_(); #endif /* Set up the headers in lsum[]. */ #ifdef _OPENMP #pragma omp simd lastprivate(krow,lk,il) #endif for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { lk = LBi( k, grid ); /* Local block number. */ il = LSUM_BLK( lk ); lsum[il - LSUM_H] = k; /* Block number prepended in the header. */ } } /* --------------------------------------------------------- Initialize the async Bcast trees on all processes. --------------------------------------------------------- */ nsupers_j = CEILING( nsupers, grid->npcol ); /* Number of local block columns */ nbtree = 0; for (lk=0;lk<nsupers_j;++lk){ if(LBtree_ptr[lk]!=NULL){ // printf("LBtree_ptr lk %5d\n",lk); if(BcTree_IsRoot(LBtree_ptr[lk],'d')==NO){ nbtree++; if(BcTree_getDestCount(LBtree_ptr[lk],'d')>0)nfrecvx_buf++; } BcTree_allocateRequest(LBtree_ptr[lk],'d'); } } nsupers_i = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ if ( !( leafsups = (int_t*)intCalloc_dist(nsupers_i)) ) ABORT("Calloc fails for leafsups."); nrtree = 0; nleaf=0; nfrecvmod=0; if(procs==1){ for (lk=0;lk<nsupers_i;++lk){ gb = myrow+lk*grid->nprow; /* not sure */ if(gb<nsupers){ if (fmod[lk*aln_i]==0){ leafsups[nleaf]=gb; ++nleaf; } } } }else{ for (lk=0;lk<nsupers_i;++lk){ if(LRtree_ptr[lk]!=NULL){ nrtree++; RdTree_allocateRequest(LRtree_ptr[lk],'d'); frecv[lk] = RdTree_GetDestCount(LRtree_ptr[lk],'d'); nfrecvmod += frecv[lk]; }else{ gb = myrow+lk*grid->nprow; /* not sure */ if(gb<nsupers){ kcol = PCOL( gb, grid ); if(mycol==kcol) { /* Diagonal process */ if (fmod[lk*aln_i]==0){ leafsups[nleaf]=gb; ++nleaf; } } } } } } #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < nlb; ++i) fmod[i*aln_i] += frecv[i]; if ( !(recvbuf_BC_fwd = (double*)SUPERLU_MALLOC(maxrecvsz*(nfrecvx+1) * sizeof(double))) ) // this needs to be optimized for 1D row mapping ABORT("Malloc fails for recvbuf_BC_fwd[]."); nfrecvx_buf=0; log_memory(nlb*aln_i*iword+nlb*iword+(CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i*2.0*iword+ nsupers_i*iword + sizelsum*num_thread * dword + (ldalsum * nrhs + nlb * XK_H) *dword + (sizertemp*num_thread + 1)*dword+maxrecvsz*(nfrecvx+1)*dword, stat); //account for fmod, frecv, leaf_send, root_send, leafsups, recvbuf_BC_fwd , lsum, x, rtemp #if ( DEBUGlevel>=2 ) printf("(%2d) nfrecvx %4d, nfrecvmod %4d, nleaf %4d\n, nbtree %4d\n, nrtree %4d\n", iam, nfrecvx, nfrecvmod, nleaf, nbtree, nrtree); fflush(stdout); #endif #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. Setup L-solve time\t%8.4f\n", t); fflush(stdout); MPI_Barrier( grid->comm ); t = SuperLU_timer_(); #endif #if ( VAMPIR>=1 ) // VT_initialize(); VT_traceon(); #endif #ifdef USE_VTUNE __SSC_MARK(0x111);// start SDE tracing, note uses 2 underscores __itt_resume(); // start VTune, again use 2 underscores #endif /* --------------------------------------------------------- Solve the leaf nodes first by all the diagonal processes. --------------------------------------------------------- */ #if ( DEBUGlevel>=2 ) printf("(%2d) nleaf %4d\n", iam, nleaf); fflush(stdout); #endif #ifdef _OPENMP #pragma omp parallel default (shared) #endif { { if (Llu->inv == 1) { /* Diagonal is inverted. */ #ifdef _OPENMP #pragma omp for firstprivate(nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,k,knsupc,lk,luptr,lsub,nsupr,lusup,t1,t2,Linv,i,lib,rtemp_loc,nleaf_send_tmp) nowait #endif for (jj=0;jj<nleaf;jj++){ k=leafsups[jj]; // #ifdef _OPENMP // #pragma omp task firstprivate (k,nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,knsupc,lk,luptr,lsub,nsupr,lusup,thread_id,t1,t2,Linv,i,lib,rtemp_loc) // #endif { #if ( PROFlevel>=1 ) TIC(t1); #endif rtemp_loc = &rtemp[sizertemp* thread_id]; knsupc = SuperSize( k ); lk = LBi( k, grid ); ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; Linv = Linv_bc_ptr[lk]; #ifdef _CRAY SGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ x[ii+i] = rtemp_loc[i]; } // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x_l: %f\n",x[ii+i]); // fflush(stdout); // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += knsupc * (knsupc - 1) * nrhs; // --nleaf; #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(LBtree_ptr[lk]!=NULL){ lib = LBi( k, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); #ifdef _OPENMP #pragma omp atomic capture #endif nleaf_send_tmp = ++nleaf_send; leaf_send[(nleaf_send_tmp-1)*aln_i] = lk; // BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],'d'); } } } } else { /* Diagonal is not inverted. */ #ifdef _OPENMP #pragma omp for firstprivate (nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,k,knsupc,lk,luptr,lsub,nsupr,lusup,t1,t2,Linv,i,lib,rtemp_loc,nleaf_send_tmp) nowait #endif for (jj=0;jj<nleaf;jj++) { k=leafsups[jj]; { #if ( PROFlevel>=1 ) TIC(t1); #endif rtemp_loc = &rtemp[sizertemp* thread_id]; knsupc = SuperSize( k ); lk = LBi( k, grid ); ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; #ifdef _CRAY STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x_l: %f\n",x[ii+i]); // fflush(stdout); // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += knsupc * (knsupc - 1) * nrhs; // --nleaf; #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if (LBtree_ptr[lk]!=NULL) { lib = LBi( k, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); #ifdef _OPENMP #pragma omp atomic capture #endif nleaf_send_tmp = ++nleaf_send; leaf_send[(nleaf_send_tmp-1)*aln_i] = lk; } } /* end a block */ } /* end for jj ... */ } /* end else ... diagonal is not invedted */ } } jj=0; #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { #ifdef _OPENMP #pragma omp taskloop private (k,ii,lk) num_tasks(num_thread*8) nogroup #endif for (jj=0;jj<nleaf;jj++){ k=leafsups[jj]; { /* Diagonal process */ lk = LBi( k, grid ); ii = X_BLK( lk ); /* * Perform local block modifications: lsum[i] -= L_i,k * X[k] */ dlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, k, fmod, xsup, grid, Llu, stat_loc, leaf_send, &nleaf_send,sizelsum,sizertemp,0,maxsuper,thread_id,num_thread); } // } /* if diagonal process ... */ } /* for k ... */ } } for (i=0;i<nleaf_send;i++){ lk = leaf_send[i*aln_i]; if(lk>=0){ // this is a bcast forwarding gb = mycol+lk*grid->npcol; /* not sure */ lib = LBi( gb, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(LBtree_ptr[lk],'d')*nrhs+XK_H,'d'); }else{ // this is a reduce forwarding lk = -lk - 1; il = LSUM_BLK( lk ); RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],RdTree_GetMsgSize(LRtree_ptr[lk],'d')*nrhs+LSUM_H,'d'); } } #ifdef USE_VTUNE __itt_pause(); // stop VTune __SSC_MARK(0x222); // stop SDE tracing #endif /* ----------------------------------------------------------- Compute the internal nodes asynchronously by all processes. ----------------------------------------------------------- */ #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { for ( nfrecv =0; nfrecv<nfrecvx+nfrecvmod;nfrecv++) { /* While not finished. */ thread_id = 0; #if ( PROFlevel>=1 ) TIC(t1); // msgcnt[1] = maxrecvsz; #endif recvbuf0 = &recvbuf_BC_fwd[nfrecvx_buf*maxrecvsz]; /* Receive a message. */ MPI_Recv( recvbuf0, maxrecvsz, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &status ); // MPI_Irecv(recvbuf0,maxrecvsz,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,grid->comm,&req); // ready=0; // while(ready==0){ // MPI_Test(&req,&ready,&status); // #pragma omp taskyield // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_COMM] += t2; msg_cnt += 1; msg_vol += maxrecvsz * dword; #endif { k = *recvbuf0; #if ( DEBUGlevel>=2 ) printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG); #endif if(status.MPI_TAG==BC_L){ // --nfrecvx; nfrecvx_buf++; { lk = LBj( k, grid ); /* local block number */ if(BcTree_getDestCount(LBtree_ptr[lk],'d')>0){ BcTree_forwardMessageSimple(LBtree_ptr[lk],recvbuf0,BcTree_GetMsgSize(LBtree_ptr[lk],'d')*nrhs+XK_H,'d'); // nfrecvx_buf++; } /* * Perform local block modifications: lsum[i] -= L_i,k * X[k] */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; if ( lsub ) { krow = PROW( k, grid ); if(myrow==krow){ nb = lsub[0] - 1; knsupc = SuperSize( k ); ii = X_BLK( LBi( k, grid ) ); xin = &x[ii]; }else{ nb = lsub[0]; knsupc = SuperSize( k ); xin = &recvbuf0[XK_H] ; } dlsum_fmod_inv_master(lsum, x, xin, rtemp, nrhs, knsupc, k, fmod, nb, xsup, grid, Llu, stat_loc,sizelsum,sizertemp,0,maxsuper,thread_id,num_thread); } /* if lsub */ } }else if(status.MPI_TAG==RD_L){ // --nfrecvmod; lk = LBi( k, grid ); /* Local block number, row-wise. */ knsupc = SuperSize( k ); tempv = &recvbuf0[LSUM_H]; il = LSUM_BLK( lk ); RHS_ITERATE(j) { for (i = 0; i < knsupc; ++i) lsum[i + il + j*knsupc + thread_id*sizelsum] += tempv[i + j*knsupc]; } // #ifdef _OPENMP // #pragma omp atomic capture // #endif fmod_tmp=--fmod[lk*aln_i]; { thread_id = 0; rtemp_loc = &rtemp[sizertemp* thread_id]; if ( fmod_tmp==0 ) { if(RdTree_IsRoot(LRtree_ptr[lk],'d')==YES){ // ii = X_BLK( lk ); knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) lsum[il + jj ] += lsum[il + jj + ii*sizelsum]; ii = X_BLK( lk ); RHS_ITERATE(j) #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < knsupc; ++i) x[i + ii + j*knsupc] += lsum[i + il + j*knsupc ]; // fmod[lk] = -1; /* Do not solve X[k] in the future. */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; #if ( PROFlevel>=1 ) TIC(t1); #endif if(Llu->inv == 1){ Linv = Linv_bc_ptr[lk]; #ifdef _CRAY SGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ x[ii+i] = rtemp_loc[i]; } } else{ #ifdef _CRAY STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += knsupc * (knsupc - 1) * nrhs; #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(LBtree_ptr[lk]!=NULL){ BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(LBtree_ptr[lk],'d')*nrhs+XK_H,'d'); } /* * Perform local block modifications. */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; if ( lsub ) { krow = PROW( k, grid ); nb = lsub[0] - 1; knsupc = SuperSize( k ); ii = X_BLK( LBi( k, grid ) ); xin = &x[ii]; dlsum_fmod_inv_master(lsum, x, xin, rtemp, nrhs, knsupc, k, fmod, nb, xsup, grid, Llu, stat_loc,sizelsum,sizertemp,0,maxsuper,thread_id,num_thread); } /* if lsub */ // } }else{ il = LSUM_BLK( lk ); knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) lsum[il + jj] += lsum[il + jj + ii*sizelsum]; RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il-LSUM_H],RdTree_GetMsgSize(LRtree_ptr[lk],'d')*nrhs+LSUM_H,'d'); } } } } /* check Tag */ } } /* while not finished ... */ } } #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; stat->utime[SOL_TOT] += t; if ( !iam ) { printf(".. L-solve time\t%8.4f\n", t); fflush(stdout); } MPI_Reduce (&t, &tmax, 1, MPI_DOUBLE, MPI_MAX, 0, grid->comm); if ( !iam ) { printf(".. L-solve time (MAX) \t%8.4f\n", tmax); fflush(stdout); } t = SuperLU_timer_(); #endif #if ( DEBUGlevel==2 ) { printf("(%d) .. After L-solve: y =\n", iam); for (i = 0, k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); kcol = PCOL( k, grid ); if ( myrow == krow && mycol == kcol ) { /* Diagonal process */ knsupc = SuperSize( k ); lk = LBi( k, grid ); ii = X_BLK( lk ); for (j = 0; j < knsupc; ++j) printf("\t(%d)\t%4d\t%.10f\n", iam, xsup[k]+j, x[ii+j]); fflush(stdout); } MPI_Barrier( grid->comm ); } } #endif SUPERLU_FREE(fmod); SUPERLU_FREE(frecv); SUPERLU_FREE(leaf_send); SUPERLU_FREE(leafsups); SUPERLU_FREE(recvbuf_BC_fwd); log_memory(-nlb*aln_i*iword-nlb*iword-(CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i*iword- nsupers_i*iword -maxrecvsz*(nfrecvx+1)*dword, stat); //account for fmod, frecv, leaf_send, leafsups, recvbuf_BC_fwd for (lk=0;lk<nsupers_j;++lk){ if(LBtree_ptr[lk]!=NULL){ // if(BcTree_IsRoot(LBtree_ptr[lk],'d')==YES){ BcTree_waitSendRequest(LBtree_ptr[lk],'d'); // } // deallocate requests here } } for (lk=0;lk<nsupers_i;++lk){ if(LRtree_ptr[lk]!=NULL){ RdTree_waitSendRequest(LRtree_ptr[lk],'d'); // deallocate requests here } } MPI_Barrier( grid->comm ); #if ( VAMPIR>=1 ) VT_traceoff(); VT_finalize(); #endif /*--------------------------------------------------- * Back solve Ux = y. * * The Y components from the forward solve is already * on the diagonal processes. *---------------------------------------------------*/ /* Save the count to be altered so it can be used by subsequent call to PDGSTRS. */ if ( !(bmod = intMalloc_dist(nlb*aln_i)) ) ABORT("Malloc fails for bmod[]."); for (i = 0; i < nlb; ++i) bmod[i*aln_i] = Llu->bmod[i]; if ( !(brecv = intCalloc_dist(nlb)) ) ABORT("Calloc fails for brecv[]."); Llu->brecv = brecv; k = SUPERLU_MAX( Llu->nfsendx, Llu->nbsendx ) + nlb; /* Re-initialize lsum to zero. Each block header is already in place. */ #ifdef _OPENMP #pragma omp parallel default(shared) private(ii) { for(ii=0;ii<sizelsum;ii++) lsum[thread_id*sizelsum+ii]=zero; } /* Set up the headers in lsum[]. */ #ifdef _OPENMP #pragma omp simd lastprivate(krow,lk,il) #endif for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { lk = LBi( k, grid ); /* Local block number. */ il = LSUM_BLK( lk ); lsum[il - LSUM_H] = k; /* Block number prepended in the header. */ } } #else for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { knsupc = SuperSize( k ); lk = LBi( k, grid ); il = LSUM_BLK( lk ); dest = &lsum[il]; for (jj = 0; jj < num_thread; ++jj) { RHS_ITERATE(j) { for (i = 0; i < knsupc; ++i) dest[i + j*knsupc + jj*sizelsum] = zero; } } } } #endif #if ( DEBUGlevel>=2 ) for (p = 0; p < Pr*Pc; ++p) { if (iam == p) { printf("(%2d) .. Ublocks %d\n", iam, Ublocks); for (lb = 0; lb < nub; ++lb) { printf("(%2d) Local col %2d: # row blocks %2d\n", iam, lb, Urbs[lb]); if ( Urbs[lb] ) { for (i = 0; i < Urbs[lb]; ++i) printf("(%2d) .. row blk %2d:\ lbnum %d, indpos %d, valpos %d\n", iam, i, Ucb_indptr[lb][i].lbnum, Ucb_indptr[lb][i].indpos, Ucb_valptr[lb][i]); } } } MPI_Barrier( grid->comm ); } for (p = 0; p < Pr*Pc; ++p) { if ( iam == p ) { printf("\n(%d) bsendx_plist[][]", iam); for (lb = 0; lb < nub; ++lb) { printf("\n(%d) .. local col %2d: ", iam, lb); for (i = 0; i < Pr; ++i) printf("%4d", bsendx_plist[lb][i]); } printf("\n"); } MPI_Barrier( grid->comm ); } #endif /* DEBUGlevel */ /* --------------------------------------------------------- Initialize the async Bcast trees on all processes. --------------------------------------------------------- */ nsupers_j = CEILING( nsupers, grid->npcol ); /* Number of local block columns */ nbtree = 0; for (lk=0;lk<nsupers_j;++lk){ if(UBtree_ptr[lk]!=NULL){ // printf("UBtree_ptr lk %5d\n",lk); if(BcTree_IsRoot(UBtree_ptr[lk],'d')==NO){ nbtree++; if(BcTree_getDestCount(UBtree_ptr[lk],'d')>0)nbrecvx_buf++; } BcTree_allocateRequest(UBtree_ptr[lk],'d'); } } nsupers_i = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ if ( !( rootsups = (int_t*)intCalloc_dist(nsupers_i)) ) ABORT("Calloc fails for rootsups."); nrtree = 0; nroot=0; for (lk=0;lk<nsupers_i;++lk){ if(URtree_ptr[lk]!=NULL){ // printf("here lk %5d myid %5d\n",lk,iam); // fflush(stdout); nrtree++; RdTree_allocateRequest(URtree_ptr[lk],'d'); brecv[lk] = RdTree_GetDestCount(URtree_ptr[lk],'d'); nbrecvmod += brecv[lk]; }else{ gb = myrow+lk*grid->nprow; /* not sure */ if(gb<nsupers){ kcol = PCOL( gb, grid ); if(mycol==kcol) { /* Diagonal process */ if (bmod[lk*aln_i]==0){ rootsups[nroot]=gb; ++nroot; } } } } } #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < nlb; ++i) bmod[i*aln_i] += brecv[i]; // for (i = 0; i < nlb; ++i)printf("bmod[i]: %5d\n",bmod[i]); if ( !(recvbuf_BC_fwd = (double*)SUPERLU_MALLOC(maxrecvsz*(nbrecvx+1) * sizeof(double))) ) // this needs to be optimized for 1D row mapping ABORT("Malloc fails for recvbuf_BC_fwd[]."); nbrecvx_buf=0; log_memory(nlb*aln_i*iword+nlb*iword + nsupers_i*iword + maxrecvsz*(nbrecvx+1)*dword, stat); //account for bmod, brecv, rootsups, recvbuf_BC_fwd #if ( DEBUGlevel>=2 ) printf("(%2d) nbrecvx %4d, nbrecvmod %4d, nroot %4d\n, nbtree %4d\n, nrtree %4d\n", iam, nbrecvx, nbrecvmod, nroot, nbtree, nrtree); fflush(stdout); #endif #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. Setup U-solve time\t%8.4f\n", t); fflush(stdout); MPI_Barrier( grid->comm ); t = SuperLU_timer_(); #endif /* * Solve the roots first by all the diagonal processes. */ #if ( DEBUGlevel>=2 ) printf("(%2d) nroot %4d\n", iam, nroot); fflush(stdout); #endif #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { #ifdef _OPENMP #pragma omp taskloop firstprivate (nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,jj,k,knsupc,lk,luptr,lsub,nsupr,lusup,t1,t2,Uinv,i,lib,rtemp_loc,nroot_send_tmp) nogroup #endif for (jj=0;jj<nroot;jj++){ k=rootsups[jj]; #if ( PROFlevel>=1 ) TIC(t1); #endif rtemp_loc = &rtemp[sizertemp* thread_id]; knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number, row-wise. */ // bmod[lk] = -1; /* Do not solve X[k] in the future. */ ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; if(Llu->inv == 1){ Uinv = Uinv_bc_ptr[lk]; #ifdef _CRAY SGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ x[ii+i] = rtemp_loc[i]; } }else{ #ifdef _CRAY STRSM(ftcs1, ftcs3, ftcs2, ftcs2, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) dtrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else dtrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif } // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x_u: %f\n",x[ii+i]); // fflush(stdout); // } // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x: %f\n",x[ii+i]); // fflush(stdout); // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += knsupc * (knsupc + 1) * nrhs; #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(UBtree_ptr[lk]!=NULL){ #ifdef _OPENMP #pragma omp atomic capture #endif nroot_send_tmp = ++nroot_send; root_send[(nroot_send_tmp-1)*aln_i] = lk; } } /* for k ... */ } } #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { #ifdef _OPENMP #pragma omp taskloop private (ii,jj,k,lk) nogroup #endif for (jj=0;jj<nroot;jj++){ k=rootsups[jj]; lk = LBi( k, grid ); /* Local block number, row-wise. */ ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise */ /* * Perform local block modifications: lsum[i] -= U_i,k * X[k] */ if ( Urbs[lk] ) dlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, k, bmod, Urbs, Ucb_indptr, Ucb_valptr, xsup, grid, Llu, stat_loc, root_send, &nroot_send, sizelsum,sizertemp,thread_id,num_thread); } /* for k ... */ } } for (i=0;i<nroot_send;i++){ lk = root_send[(i)*aln_i]; if(lk>=0){ // this is a bcast forwarding gb = mycol+lk*grid->npcol; /* not sure */ lib = LBi( gb, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); BcTree_forwardMessageSimple(UBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(UBtree_ptr[lk],'d')*nrhs+XK_H,'d'); }else{ // this is a reduce forwarding lk = -lk - 1; il = LSUM_BLK( lk ); RdTree_forwardMessageSimple(URtree_ptr[lk],&lsum[il - LSUM_H ],RdTree_GetMsgSize(URtree_ptr[lk],'d')*nrhs+LSUM_H,'d'); } } /* * Compute the internal nodes asychronously by all processes. */ #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif for ( nbrecv =0; nbrecv<nbrecvx+nbrecvmod;nbrecv++) { /* While not finished. */ // printf("iam %4d nbrecv %4d nbrecvx %4d nbrecvmod %4d\n", iam, nbrecv, nbrecvxnbrecvmod); // fflush(stdout); thread_id = 0; #if ( PROFlevel>=1 ) TIC(t1); #endif recvbuf0 = &recvbuf_BC_fwd[nbrecvx_buf*maxrecvsz]; /* Receive a message. */ MPI_Recv( recvbuf0, maxrecvsz, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &status ); #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_COMM] += t2; msg_cnt += 1; msg_vol += maxrecvsz * dword; #endif k = *recvbuf0; #if ( DEBUGlevel>=2 ) printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG); fflush(stdout); #endif if(status.MPI_TAG==BC_U){ // --nfrecvx; nbrecvx_buf++; lk = LBj( k, grid ); /* local block number */ if(BcTree_getDestCount(UBtree_ptr[lk],'d')>0){ BcTree_forwardMessageSimple(UBtree_ptr[lk],recvbuf0,BcTree_GetMsgSize(UBtree_ptr[lk],'d')*nrhs+XK_H,'d'); // nfrecvx_buf++; } /* * Perform local block modifications: lsum[i] -= L_i,k * X[k] */ lk = LBj( k, grid ); /* Local block number, column-wise. */ dlsum_bmod_inv_master(lsum, x, &recvbuf0[XK_H], rtemp, nrhs, k, bmod, Urbs, Ucb_indptr, Ucb_valptr, xsup, grid, Llu, stat_loc, sizelsum,sizertemp,thread_id,num_thread); }else if(status.MPI_TAG==RD_U){ lk = LBi( k, grid ); /* Local block number, row-wise. */ knsupc = SuperSize( k ); tempv = &recvbuf0[LSUM_H]; il = LSUM_BLK( lk ); RHS_ITERATE(j) { #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < knsupc; ++i) lsum[i + il + j*knsupc + thread_id*sizelsum] += tempv[i + j*knsupc]; } // #ifdef _OPENMP // #pragma omp atomic capture // #endif bmod_tmp=--bmod[lk*aln_i]; thread_id = 0; rtemp_loc = &rtemp[sizertemp* thread_id]; if ( bmod_tmp==0 ) { if(RdTree_IsRoot(URtree_ptr[lk],'d')==YES){ knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) lsum[il+ jj ] += lsum[il + jj + ii*sizelsum]; ii = X_BLK( lk ); RHS_ITERATE(j) #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < knsupc; ++i) x[i + ii + j*knsupc] += lsum[i + il + j*knsupc ]; lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; if(Llu->inv == 1){ Uinv = Uinv_bc_ptr[lk]; #ifdef _CRAY SGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else dgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ x[ii+i] = rtemp_loc[i]; } }else{ #ifdef _CRAY STRSM(ftcs1, ftcs3, ftcs2, ftcs2, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) dtrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else dtrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += knsupc * (knsupc + 1) * nrhs; #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(UBtree_ptr[lk]!=NULL){ BcTree_forwardMessageSimple(UBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(UBtree_ptr[lk],'d')*nrhs+XK_H,'d'); } /* * Perform local block modifications: * lsum[i] -= U_i,k * X[k] */ if ( Urbs[lk] ) dlsum_bmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, k, bmod, Urbs, Ucb_indptr, Ucb_valptr, xsup, grid, Llu, stat_loc, sizelsum,sizertemp,thread_id,num_thread); }else{ il = LSUM_BLK( lk ); knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) lsum[il+ jj ] += lsum[il + jj + ii*sizelsum]; RdTree_forwardMessageSimple(URtree_ptr[lk],&lsum[il-LSUM_H],RdTree_GetMsgSize(URtree_ptr[lk],'d')*nrhs+LSUM_H,'d'); } } } } /* while not finished ... */ } #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; stat->utime[SOL_TOT] += t; if ( !iam ) printf(".. U-solve time\t%8.4f\n", t); MPI_Reduce (&t, &tmax, 1, MPI_DOUBLE, MPI_MAX, 0, grid->comm); if ( !iam ) { printf(".. U-solve time (MAX) \t%8.4f\n", tmax); fflush(stdout); } t = SuperLU_timer_(); #endif #if ( DEBUGlevel>=2 ) { double *x_col; int diag; printf("\n(%d) .. After U-solve: x (ON DIAG PROCS) = \n", iam); ii = 0; for (k = 0; k < nsupers; ++k) { knsupc = SuperSize( k ); krow = PROW( k, grid ); kcol = PCOL( k, grid ); diag = PNUM( krow, kcol, grid); if ( iam == diag ) { /* Diagonal process. */ lk = LBi( k, grid ); jj = X_BLK( lk ); x_col = &x[jj]; RHS_ITERATE(j) { for (i = 0; i < knsupc; ++i) { /* X stored in blocks */ printf("\t(%d)\t%4d\t%.10f\n", iam, xsup[k]+i, x_col[i]); } x_col += knsupc; } } ii += knsupc; } /* for k ... */ } #endif pdReDistribute_X_to_B(n, B, m_loc, ldb, fst_row, nrhs, x, ilsum, ScalePermstruct, Glu_persist, grid, SOLVEstruct); #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. X to B redistribute time\t%8.4f\n", t); t = SuperLU_timer_(); #endif double tmp1=0; double tmp2=0; double tmp3=0; double tmp4=0; for(i=0;i<num_thread;i++){ tmp1 = SUPERLU_MAX(tmp1,stat_loc[i]->utime[SOL_TRSM]); tmp2 = SUPERLU_MAX(tmp2,stat_loc[i]->utime[SOL_GEMM]); tmp3 = SUPERLU_MAX(tmp3,stat_loc[i]->utime[SOL_COMM]); tmp4 += stat_loc[i]->ops[SOLVE]; #if ( PRNTlevel>=2 ) if(iam==0)printf("thread %5d gemm %9.5f\n",i,stat_loc[i]->utime[SOL_GEMM]); #endif } stat->utime[SOL_TRSM] += tmp1; stat->utime[SOL_GEMM] += tmp2; stat->utime[SOL_COMM] += tmp3; stat->ops[SOLVE]+= tmp4; /* Deallocate storage. */ for(i=0;i<num_thread;i++){ PStatFree(stat_loc[i]); SUPERLU_FREE(stat_loc[i]); } SUPERLU_FREE(stat_loc); SUPERLU_FREE(rtemp); SUPERLU_FREE(lsum); SUPERLU_FREE(x); SUPERLU_FREE(bmod); SUPERLU_FREE(brecv); SUPERLU_FREE(root_send); SUPERLU_FREE(rootsups); SUPERLU_FREE(recvbuf_BC_fwd); log_memory(-nlb*aln_i*iword-nlb*iword - nsupers_i*iword - (CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i*iword - maxrecvsz*(nbrecvx+1)*dword - sizelsum*num_thread * dword - (ldalsum * nrhs + nlb * XK_H) *dword - (sizertemp*num_thread + 1)*dword, stat); //account for bmod, brecv, root_send, rootsups, recvbuf_BC_fwd,rtemp,lsum,x for (lk=0;lk<nsupers_j;++lk){ if(UBtree_ptr[lk]!=NULL){ // if(BcTree_IsRoot(LBtree_ptr[lk],'d')==YES){ BcTree_waitSendRequest(UBtree_ptr[lk],'d'); // } // deallocate requests here } } for (lk=0;lk<nsupers_i;++lk){ if(URtree_ptr[lk]!=NULL){ RdTree_waitSendRequest(URtree_ptr[lk],'d'); // deallocate requests here } } MPI_Barrier( grid->comm ); #if ( PROFlevel>=2 ) { float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum; MPI_Reduce (&msg_cnt, &msg_cnt_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_cnt, &msg_cnt_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); if (!iam) { printf ("\tPDGSTRS comm stat:" "\tAvg\tMax\t\tAvg\tMax\n" "\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n", msg_cnt_sum / Pr / Pc, msg_cnt_max, msg_vol_sum / Pr / Pc * 1e-6, msg_vol_max * 1e-6); } } #endif stat->utime[SOLVE] = SuperLU_timer_() - t1_sol; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Exit pdgstrs()"); #endif #if ( PRNTlevel>=2 ) float for_lu, total, max, avg, temp; superlu_dist_mem_usage_t num_mem_usage; dQuerySpace_dist(n, LUstruct, grid, stat, &num_mem_usage); temp = num_mem_usage.total; MPI_Reduce( &temp, &max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm ); MPI_Reduce( &temp, &avg, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm ); if (!iam) { printf("\n** Memory Usage **********************************\n"); printf("** Total highmark (MB):\n" " Sum-of-all : %8.2f | Avg : %8.2f | Max : %8.2f\n", avg * 1e-6, avg / grid->nprow / grid->npcol * 1e-6, max * 1e-6); printf("**************************************************\n"); fflush(stdout); } #endif return; } /* PDGSTRS */
gemm_x_coo_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif static void mm_coo_plain_outcols(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy, ALPHA_INT lrs, ALPHA_INT lre) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT tid = alpha_get_thread_id(); for (ALPHA_INT nn = lrs; nn < lre; ++nn) { ALPHA_INT cr = mat->row_indx[nn]; if (cr % num_threads != tid) continue; ALPHA_Number *Y = &y[index2(cr, 0, ldy)]; ALPHA_Number val; alpha_mul(val, alpha, mat->values[nn]); const ALPHA_Number *X = &x[index2(mat->col_indx[nn], 0, ldx)]; ALPHA_INT c = 0; for (; c < columns; c++) { alpha_madde(Y[c], val, X[c]); } } } static alphasparse_status_t mm_coo_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (int i = 0; i < mat->rows; i++) for (int j = 0; j < columns; j++) alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { mm_coo_plain_outcols(alpha, mat, x, columns, ldx, beta, y, ldy, 0, mat->nnz); } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { return mm_coo_omp(alpha, mat, x, columns, ldx, beta, y, ldy); }
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * Copyright (C) ARM Ltd. 2017-2021. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "api/libperf.h" #include "lib/libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/sys/sock.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <sys/poll.h> #include <locale.h> #if defined (HAVE_MPI) # include <mpi.h> #elif defined (HAVE_RTE) # include<rte.h> #endif #define MAX_BATCH_FILES 32 #define MAX_CPUS 1024 #define TL_RESOURCE_NAME_NONE "<none>" #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCIqM:r:E:T:d:x:A:BUem:R:" #define TEST_ID_UNDEFINED -1 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; const char *overhead_lat; unsigned window_size; } test_type_t; typedef struct perftest_params { ucx_perf_params_t super; int test_id; } perftest_params_t; struct perftest_context { perftest_params_t params; const char *server_addr; int port; int mpi; unsigned num_cpus; unsigned cpus[MAX_CPUS]; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency", "latency", 1}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency", "latency", 1}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate", "latency", 1}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate", "latency", 1}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate", "latency", 1}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate", "overhead", 1}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate", "overhead", 1}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate", "overhead", 1}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency", "latency", 1}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth", "overhead", 32}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency", "latency", 1}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth", "overhead", 32}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth", "overhead", 32}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate", "overhead", 1}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate", "latency", 1}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate", "latency", 1}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate", "latency", 1}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth", "overhead", 1}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency", "latency", 1}, {"ucp_am_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "am latency", "latency", 1}, {"ucp_am_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "am bandwidth / message rate", "overhead", 32}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { typedef ssize_t (*sock_call)(int, void *, size_t, int); return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final, int is_server, int is_multi_thread) { static const char *fmt_csv; static const char *fmt_numeric; static const char *fmt_plain; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } #if _OPENMP if (!final) { printf("[thread %d]", omp_get_thread_num()); } else if (flags & TEST_FLAG_PRINT_RESULTS) { printf("Final: "); } #endif if (is_multi_thread && final) { fmt_csv = "%4.0f,%.3f,%.2f,%.0f\n"; fmt_numeric = "%'18.0f %29.3f %22.2f %'24.0f\n"; fmt_plain = "%18.0f %29.3f %22.2f %23.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.total_average * 1000000.0, result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.total_average); } else { fmt_csv = "%4.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; fmt_numeric = "%'18.0f %10.3f %9.3f %9.3f %11.2f %10.2f %'11.0f %'11.0f\n"; fmt_plain = "%18.0f %10.3f %9.3f %9.3f %11.2f %10.2f %11.0f %11.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.percentile * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); } fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *overhead_lat_str; const char *test_data_str; const char *test_api_str; test_type_t *test; unsigned i; test = (ctx->params.test_id == TEST_ID_UNDEFINED) ? NULL : &tests[ctx->params.test_id]; if ((ctx->flags & TEST_FLAG_PRINT_TEST) && (test != NULL)) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.super.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_SHORT_IOV: test_data_str = "short iov"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Send memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.send_mem_type]); printf("| Recv memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.recv_mem_type]); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params.super)); if ((test->api == UCX_PERF_API_UCP) && (test->command == UCX_PERF_CMD_AM)) { printf("| AM header size: %-60zu |\n", ctx->params.super.ucp.am_hdr_size); } } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", ucs_basename(ctx->batch_files[i])); } printf("iterations,%.1f_percentile_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n", ctx->params.super.percentile_rank); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { overhead_lat_str = (test == NULL) ? "overhead" : test->overhead_lat; printf("+--------------+--------------+------------------------------+---------------------+-----------------------+\n"); printf("| | | %8s (usec) | bandwidth (MB/s) | message rate (msg/s) |\n", overhead_lat_str); printf("+--------------+--------------+----------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| Stage | # iterations | %4.1f%%ile | average | overall | average | overall | average | overall |\n", ctx->params.super.percentile_rank); printf("+--------------+--------------+----------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+--------------+----------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void print_memory_type_usage(void) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if (ucx_perf_mem_type_allocators[it] != NULL) { printf(" %s - %s\n", ucs_memory_type_names[it], ucs_memory_type_descs[it]); } } } static void usage(const struct perftest_context *ctx, const char *program) { static const char* api_names[] = { [UCX_PERF_API_UCT] = "UCT", [UCX_PERF_API_UCP] = "UCP" }; test_type_t *test; int UCS_V_UNUSED rank; #ifdef HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ctx->mpi && (rank != 0)) { return; } #endif #if defined (HAVE_MPI) printf(" Note: test can be also launched as an MPI application\n"); printf("\n"); #elif defined (HAVE_RTE) printf(" Note: this test can be also launched as an libRTE application\n"); printf("\n"); #endif printf(" Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); printf(" Common options:\n"); printf(" -t <test> test to run:\n"); for (test = tests; test->name; ++test) { printf(" %13s - %s %s\n", test->name, api_names[test->api], test->desc); } printf("\n"); printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n", ctx->params.super.msg_size_list[0]); printf(" for example: \"-s 16,48,8192,8192,14\"\n"); printf(" -m <send mem type>[,<recv mem type>]\n"); printf(" memory type of message for sender and receiver (host)\n"); print_memory_type_usage(); printf(" -n <iters> number of iterations to run (%"PRIu64")\n", ctx->params.super.max_iter); printf(" -w <iters> number of warm-up iterations (%"PRIu64")\n", ctx->params.super.warmup_iter); printf(" -c <cpulist> set affinity to this CPU list (separated by comma) (off)\n"); printf(" -O <count> maximal number of uncompleted outstanding sends\n"); printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n", ctx->params.super.iov_stride); printf(" -T <threads> number of threads in the test (%d)\n", ctx->params.super.thread_count); printf(" -o do not progress the responder in one-sided tests\n"); printf(" -B register memory with NONBLOCK flag\n"); printf(" -b <file> read and execute tests from a batch file: every line in the\n"); printf(" file is a test to run, first word is test name, the rest of\n"); printf(" the line is command-line arguments for the test.\n"); printf(" -R <rank> percentile rank of the percentile data in latency tests (%.1f)\n", ctx->params.super.percentile_rank); printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port); #ifdef HAVE_MPI printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h show this help message\n"); printf("\n"); printf(" Output format:\n"); printf(" -N use numeric formatting (thousands separator)\n"); printf(" -f print only final numbers\n"); printf(" -v print CSV-formatted output\n"); printf("\n"); printf(" UCT only:\n"); printf(" -d <device> device to use for testing\n"); printf(" -x <tl> transport to use for testing\n"); printf(" -D <layout> data layout for sender side:\n"); printf(" short - short messages (default, cannot be used for get)\n"); printf(" shortiov - short io-vector messages (only for active messages)\n"); printf(" bcopy - copy-out (cannot be used for atomics)\n"); printf(" zcopy - zero-copy (cannot be used for atomics)\n"); printf(" -W <count> flow control window size, for active messages (%u)\n", ctx->params.super.uct.fc_window); printf(" -H <size> active message header size (%zu), included in message size\n", ctx->params.super.uct.am_hdr_size); printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n"); printf(" thread_spinlock - separate progress thread with spin locking\n"); printf(" thread_mutex - separate progress thread with mutex locking\n"); printf(" signal - signal-based timer\n"); printf("\n"); printf(" UCP only:\n"); printf(" -M <thread> thread support level for progress engine (single)\n"); printf(" single - only the master thread can access\n"); printf(" serialized - one thread can access at a time\n"); printf(" multi - multiple threads can access\n"); printf(" -D <layout>[,<layout>]\n"); printf(" data layout for sender and receiver side (contig)\n"); printf(" contig - Continuous datatype\n"); printf(" iov - Scatter-gather list\n"); printf(" -C use wild-card tag for tag tests\n"); printf(" -U force unexpected flow by using tag probe\n"); printf(" -r <mode> receive mode for stream tests (recv)\n"); printf(" recv : Use ucp_stream_recv_nb\n"); printf(" recv_data : Use ucp_stream_recv_data_nb\n"); printf(" -I create context with wakeup feature enabled\n"); printf(" -e create endpoints with error handling support\n"); printf(" -E <mode> wait mode for tests\n"); printf(" poll : repeatedly call worker_progress\n"); printf(" sleep : go to sleep after posting requests\n"); printf(" -H <size> active message header size (%zu), not included in message size\n", ctx->params.super.ucp.am_hdr_size); printf("\n"); printf(" NOTE: When running UCP tests, transport and device should be specified by\n"); printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n"); printf("\n"); } static ucs_status_t parse_ucp_datatype_params(const char *opt_arg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(opt_arg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(opt_arg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_mem_type(const char *opt_arg, ucs_memory_type_t *mem_type) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if(!strcmp(opt_arg, ucs_memory_type_names[it]) && (ucx_perf_mem_type_allocators[it] != NULL)) { *mem_type = it; return UCS_OK; } } ucs_error("Unsupported memory type: \"%s\"", opt_arg); return UCS_ERR_INVALID_PARAM; } static ucs_status_t parse_mem_type_params(const char *opt_arg, ucs_memory_type_t *send_mem_type, ucs_memory_type_t *recv_mem_type) { const char *delim = ","; char *token = strtok((char*)opt_arg, delim); if (UCS_OK != parse_mem_type(token, send_mem_type)) { return UCS_ERR_INVALID_PARAM; } token = strtok(NULL, delim); if (NULL == token) { *recv_mem_type = *send_mem_type; return UCS_OK; } else { return parse_mem_type(token, recv_mem_type); } } static ucs_status_t parse_message_sizes_params(const char *opt_arg, ucx_perf_params_t *params) { const char delim = ','; size_t *msg_size_list, token_num, token_it; char *optarg_ptr, *optarg_ptr2; optarg_ptr = (char *)opt_arg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; msg_size_list = realloc(params->msg_size_list, sizeof(*params->msg_size_list) * token_num); if (NULL == msg_size_list) { return UCS_ERR_NO_MEMORY; } params->msg_size_list = msg_size_list; optarg_ptr = (char *)opt_arg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static ucs_status_t init_test_params(perftest_params_t *params) { memset(params, 0, sizeof(*params)); params->super.api = UCX_PERF_API_LAST; params->super.command = UCX_PERF_CMD_LAST; params->super.test_type = UCX_PERF_TEST_TYPE_LAST; params->super.thread_mode = UCS_THREAD_MODE_SINGLE; params->super.thread_count = 1; params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE; params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST; params->super.max_outstanding = 0; params->super.warmup_iter = 10000; params->super.alignment = ucs_get_page_size(); params->super.max_iter = 1000000l; params->super.max_time = 0.0; params->super.report_interval = 1.0; params->super.percentile_rank = 50.0; params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE; params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->super.uct.am_hdr_size = 8; params->super.send_mem_type = UCS_MEMORY_TYPE_HOST; params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST; params->super.msg_size_cnt = 1; params->super.iov_stride = 0; params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.am_hdr_size = 0; strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE); params->super.msg_size_list = calloc(params->super.msg_size_cnt, sizeof(*params->super.msg_size_list)); if (params->super.msg_size_list == NULL) { return UCS_ERR_NO_MEMORY; } params->super.msg_size_list[0] = 8; params->test_id = TEST_ID_UNDEFINED; return UCS_OK; } static ucs_status_t parse_test_params(perftest_params_t *params, char opt, const char *opt_arg) { char *optarg2 = NULL; test_type_t *test; unsigned i; switch (opt) { case 'd': ucs_snprintf_zero(params->super.uct.dev_name, sizeof(params->super.uct.dev_name), "%s", opt_arg); return UCS_OK; case 'x': ucs_snprintf_zero(params->super.uct.tl_name, sizeof(params->super.uct.tl_name), "%s", opt_arg); return UCS_OK; case 't': for (i = 0; tests[i].name != NULL; ++i) { test = &tests[i]; if (!strcmp(opt_arg, test->name)) { params->super.api = test->api; params->super.command = test->command; params->super.test_type = test->test_type; params->test_id = i; break; } } if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (!strcmp(opt_arg, "short")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (!strcmp(opt_arg, "shortiov")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT_IOV; } else if (!strcmp(opt_arg, "bcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (!strcmp(opt_arg, "zcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(opt_arg, &params->super.ucp.send_datatype)) { optarg2 = strchr(opt_arg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->super.ucp.recv_datatype)) { return UCS_ERR_INVALID_PARAM; } } } else { ucs_error("Invalid option argument for -D"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'E': if (!strcmp(opt_arg, "poll")) { params->super.wait_mode = UCX_PERF_WAIT_MODE_POLL; return UCS_OK; } else if (!strcmp(opt_arg, "sleep")) { params->super.wait_mode = UCX_PERF_WAIT_MODE_SLEEP; return UCS_OK; } else { ucs_error("Invalid option argument for -E"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'i': params->super.iov_stride = atol(opt_arg); return UCS_OK; case 'n': params->super.max_iter = atol(opt_arg); return UCS_OK; case 's': return parse_message_sizes_params(opt_arg, &params->super); case 'H': params->super.uct.am_hdr_size = atol(opt_arg); params->super.ucp.am_hdr_size = atol(opt_arg); return UCS_OK; case 'W': params->super.uct.fc_window = atoi(opt_arg); return UCS_OK; case 'O': params->super.max_outstanding = atoi(opt_arg); return UCS_OK; case 'w': params->super.warmup_iter = atol(opt_arg); return UCS_OK; case 'o': params->super.flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->super.flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->super.flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'U': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE; return UCS_OK; case 'I': params->super.flags |= UCX_PERF_TEST_FLAG_WAKEUP; return UCS_OK; case 'e': params->super.flags |= UCX_PERF_TEST_FLAG_ERR_HANDLING; return UCS_OK; case 'M': if (!strcmp(opt_arg, "single")) { params->super.thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (!strcmp(opt_arg, "serialized")) { params->super.thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (!strcmp(opt_arg, "multi")) { params->super.thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->super.thread_count = atoi(opt_arg); return UCS_OK; case 'A': if (!strcmp(opt_arg, "thread") || !strcmp(opt_arg, "thread_spinlock")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK; return UCS_OK; } else if (!strcmp(opt_arg, "thread_mutex")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_MUTEX; return UCS_OK; } else if (!strcmp(opt_arg, "signal")) { params->super.async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } case 'r': if (!strcmp(opt_arg, "recv_data")) { params->super.flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } else if (!strcmp(opt_arg, "recv")) { params->super.flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } return UCS_ERR_INVALID_PARAM; case 'R': params->super.percentile_rank = atof(opt_arg); if ((0.0 <= params->super.percentile_rank) && (params->super.percentile_rank <= 100.0)) { return UCS_OK; } else { ucs_error("Invalid option argument for -R"); return UCS_ERR_INVALID_PARAM; } case 'm': if (UCS_OK != parse_mem_type_params(opt_arg, &params->super.send_mem_type, &params->super.recv_mem_type)) { return UCS_ERR_INVALID_PARAM; } return UCS_OK; default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t adjust_test_params(perftest_params_t *params, const char *error_prefix) { test_type_t *test; if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("%smissing test name", error_prefix); return UCS_ERR_INVALID_PARAM; } test = &tests[params->test_id]; if (params->super.max_outstanding == 0) { params->super.max_outstanding = test->window_size; } return UCS_OK; } static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name, int *line_num, perftest_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; char error_prefix[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } ++(*line_num); argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); ucs_snprintf_safe(error_prefix, sizeof(error_prefix), "in batch file '%s' line %d: ", file_name, *line_num); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("%s-%c %s: %s", error_prefix, c, optarg, ucs_status_string(status)); return status; } } status = adjust_test_params(params, error_prefix); if (status != UCS_OK) { return status; } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_cpus(char *opt_arg, struct perftest_context *ctx) { char *endptr, *cpu_list = opt_arg; int cpu; ctx->num_cpus = 0; cpu = strtol(cpu_list, &endptr, 10); while (((*endptr == ',') || (*endptr == '\0')) && (ctx->num_cpus < MAX_CPUS)) { if (cpu < 0) { ucs_error("invalid cpu number detected: (%d)", cpu); return UCS_ERR_INVALID_PARAM; } ctx->cpus[ctx->num_cpus++] = cpu; if (*endptr == '\0') { break; } cpu_list = endptr + 1; /* skip the comma */ cpu = strtol(cpu_list, &endptr, 10); } if (*endptr == ',') { ucs_error("number of listed cpus exceeds the maximum supported value (%d)", MAX_CPUS); return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); ucx_perf_global_init(); /* initialize memory types */ status = init_test_params(&ctx->params); if (status != UCS_OK) { return status; } ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; ctx->mpi = mpi_initialized; optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = optarg; } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; status = parse_cpus(optarg, ctx); if (status != UCS_OK) { return status; } break; case 'P': #ifdef HAVE_MPI ctx->mpi = atoi(optarg) && mpi_initialized; break; #endif case 'h': usage(ctx, ucs_basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, ucs_basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned snc; snc = magic; safe_send(group->connfd, &snc, sizeof(unsigned), progress, arg); snc = 0; safe_recv(group->connfd, &snc, sizeof(unsigned), progress, arg); ucs_assert(snc == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (status != UCS_OK) { goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); /* release the memory for the list of the message sizes allocated * during the initialization of the default testing parameters */ free(ctx->params.super.msg_size_list); ctx->params.super.msg_size_list = NULL; ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } if (ctx->params.super.msg_size_cnt != 0) { ctx->params.super.msg_size_list = calloc(ctx->params.super.msg_size_cnt, sizeof(*ctx->params.super.msg_size_list)); if (NULL == ctx->params.super.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } ret = safe_recv(connfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.super.msg_size_cnt != 0) { safe_send(sockfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = &ctx->sock_rte_group; ctx->params.super.rte = &sock_rte; ctx->params.super.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if defined (HAVE_MPI) static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp barrier #pragma omp master { /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } #elif defined (HAVE_RTE) static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { #if defined (HAVE_MPI) static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; int size, rank; ucs_trace_func(""); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = NULL; ctx->params.super.rte = &mpi_rte; ctx->params.super.report_arg = ctx; #elif defined (HAVE_RTE) ucs_trace_func(""); ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = group; ctx->params.super.rte = &ext_rte; ctx->params.super.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #ifdef HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { ucs_sys_cpuset_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { for (i = 0; i < ctx->num_cpus; i++) { if (ctx->cpus[i] >= nr_cpus) { ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } } for (i = 0; i < ctx->num_cpus; i++) { CPU_SET(ctx->cpus[i], &cpuset); } ret = ucs_sys_setaffinity(&cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = ucs_sys_getaffinity(&cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t clone_params(perftest_params_t *dest, const perftest_params_t *src) { size_t msg_size_list_size; *dest = *src; msg_size_list_size = dest->super.msg_size_cnt * sizeof(*dest->super.msg_size_list); dest->super.msg_size_list = malloc(msg_size_list_size); if (dest->super.msg_size_list == NULL) { return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK); } memcpy(dest->super.msg_size_list, src->super.msg_size_list, msg_size_list_size); return UCS_OK; } static ucs_status_t check_params(const perftest_params_t *params) { switch (params->super.api) { case UCX_PERF_API_UCT: if (!strcmp(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_error("A device must be specified with -d flag for UCT test"); return UCS_ERR_INVALID_PARAM; } if (!strcmp(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_error( "A transport must be specified with -x flag for UCT test"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case UCX_PERF_API_UCP: if (strcmp(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help " "message", params->super.uct.dev_name); } if (strcmp(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help " "message", params->super.uct.tl_name); } return UCS_OK; default: ucs_error("Invalid test case"); return UCS_ERR_INVALID_PARAM; } } static ucs_status_t run_test_recurs(struct perftest_context *ctx, const perftest_params_t *parent_params, unsigned depth) { perftest_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; int line_num; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (depth >= ctx->num_batch_files) { print_test_name(ctx); status = check_params(parent_params); if (status != UCS_OK) { return status; } return ucx_perf_run(&parent_params->super, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } line_num = 0; do { status = clone_params(&params, parent_params); if (status != UCS_OK) { goto out; } status = read_batch_file(batch_file, ctx->batch_files[depth], &line_num, &params, &ctx->test_names[depth]); if (status == UCS_OK) { run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); ctx->test_names[depth] = NULL; } free(params.super.msg_size_list); params.super.msg_size_list = NULL; } while (status == UCS_OK); if (status == UCS_ERR_NO_ELEM) { status = UCS_OK; } out: fclose(batch_file); return status; } static ucs_status_t run_test(struct perftest_context *ctx) { const char *error_prefix; ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); /* no batch files, only command line params */ if (ctx->num_batch_files == 0) { error_prefix = (ctx->flags & TEST_FLAG_PRINT_RESULTS) ? "command line: " : ""; status = adjust_test_params(&ctx->params, error_prefix); if (status != UCS_OK) { return status; } } print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #ifdef HAVE_MPI int provided; mpi_initialized = !isatty(0) && /* Using MPI_THREAD_FUNNELED since ucx_perftest supports * using multiple threads when only the main one makes * MPI calls (which is also suitable for a single threaded * run). * MPI_THREAD_FUNNELED: * The process may be multi-threaded, but only the main * thread will make MPI calls (all MPI calls are funneled * to the main thread). */ (MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0); if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) { printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n", provided); ret = -1; goto out; } #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out_msg_size_list; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #ifdef HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out_msg_size_list: free(ctx.params.super.msg_size_list); #if HAVE_MPI out: #endif if (mpi_initialized) { #ifdef HAVE_MPI MPI_Finalize(); #endif } return ret; }
EmbeddingBag.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.) ******************************************************************************/ #define JIT_REDUCE_COLS_IDX #ifdef JIT_REDUCE_COLS_IDX #include <libxsmm.h> #endif #include "utils.h" #include "rtm.h" template <typename T> class EmbeddingBagImpl { public: EmbeddingBagImpl(int M, int E) : M(M), E(E) { weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment); } ~EmbeddingBagImpl() { my_free(weight_); weight_ = 0; } void init(T low = -0.1, T high = 0.1) { init_random(M * E, weight_, low, high); } #ifdef JIT_REDUCE_COLS_IDX void forward(int N, int NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; libxsmm_meltwfunction_reduce_cols_idx kernel; int _ld = E; kernel = libxsmm_dispatch_meltw_reduce_cols_idx(E, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, (sizeof(long) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32) ; #pragma omp parallel for for (int n = 0; n < N; n++) { libxsmm_meltw_reduce_cols_idx_param params; auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); params.n = end - start; params.ind_ptr = &indices[start]; params.inp_ptr = weight; params.out_ptr = &output[n][0]; kernel( &params ); } } #else void forward(int N, int NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; #pragma omp parallel for for (int n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); #pragma omp simd for (long v = 0; v < E; v++) output[n][v] = 0; for (long s = start; s < end; s++) { auto ind = indices[s]; #pragma omp simd for (long v = 0; v < E; v++) { output[n][v] += weight[ind][v]; } } } } #endif void backward(int N, int NS, const T *gradout_, const long *offsets, const long *indices, T *values_) { T(*__restrict gradout)[E] = (T(*)[*])gradout_; T(*__restrict values)[E] = (T(*)[*])values_; #pragma omp parallel for for (int n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); for (long s = start; s < end; s++) { #pragma omp simd #ifdef STREAMING_WRITES #pragma vector nontemporal(values) #endif for (long v = 0; v < E; v++) values[s][v] = gradout[n][v]; } } } void update(int NS, const T *grads_, const long *indices, float lr) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict grads)[E] = (T(*)[*])grads_; SimpleSpinLock fallBackLock; #pragma omp parallel for for (long i = 0; i < NS; i++) { long ind = indices[i]; { TransactionScope guard(fallBackLock, 100, 0); #pragma omp simd for (long v = 0; v < E; v++) weight[ind][v] += lr * grads[i][v]; } } } T *weight_; int M; int E; }; typedef EmbeddingBagImpl<FTyp> EmbeddingBag;
GB_binop__ge_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp32) // A*D function (colscale): GB (_AxD__ge_fp32) // D*A function (rowscale): GB (_DxB__ge_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__ge_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ge_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp32) // C=scalar+B GB (_bind1st__ge_fp32) // C=scalar+B' GB (_bind1st_tran__ge_fp32) // C=A+scalar GB (_bind2nd__ge_fp32) // C=A'+scalar GB (_bind2nd_tran__ge_fp32) // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isgt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_uint16 // A.*B function (eWiseMult): GB_AemultB__isgt_uint16 // A*D function (colscale): GB_AxD__isgt_uint16 // D*A function (rowscale): GB_DxB__isgt_uint16 // C+=B function (dense accum): GB_Cdense_accumB__isgt_uint16 // C+=b function (dense accum): GB_Cdense_accumb__isgt_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_uint16 // C=scalar+B GB_bind1st__isgt_uint16 // C=scalar+B' GB_bind1st_tran__isgt_uint16 // C=A+scalar GB_bind2nd__isgt_uint16 // C=A'+scalar GB_bind2nd_tran__isgt_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT16 || GxB_NO_ISGT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
example2.c
// calculation example for electromagnetic field intensity distribution #include "multi_fbeam.h" int main() { Bobj bm; FILE *fp1,*fp2; double complex e[3],h[3]; double x[3],rang,dr,*ie,*ih; int max,i,j; init_mfb(&bm); read_data_mfb(&bm); print_data_mfb(&bm); setup_mfb(&bm); max=200; rang=4.0*bm.lambda_0; dr=rang*2/(double)(max-1); ie=(double *)m_alloc2(max,sizeof(double),"example2.c,ie"); ih=(double *)m_alloc2(max,sizeof(double),"example2.c,ih"); // x=0 plane if((fp1=fopen("Ie_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# y z electric_field_intensity"); if((fp2=fopen("Ih_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# y z magnetic_field_intensity"); x[0]=0.0; for(i=0;i<max;i++){ x[1]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(e,h) // omp parallel for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; calc_mfb_EH(e,h,x,&bm); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",x[1],x[2],ie[j]); fprintf(fp2,"%g %g %15.14e\n",x[1],x[2],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); // y=0 plane if((fp1=fopen("Ie_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# x z electric_field_intensity"); if((fp2=fopen("Ih_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# x z magnetic_field_intensity"); x[1]=0.0; for(i=0;i<max;i++){ x[0]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(e,h) // omp parallel for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; calc_mfb_EH(e,h,x,&bm); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); }// end parallel for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",x[0],x[2],ie[j]); fprintf(fp2,"%g %g %15.14e\n",x[0],x[2],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); // z=0 plane if((fp1=fopen("Ie_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# x y electric_field_intensity"); if((fp2=fopen("Ih_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# x y electric_field_intensity"); x[2]=0.0; for(i=0;i<max;i++){ x[0]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(e,h) // omp parallel for(j=0;j<max;j++){ x[1]=-rang+(double)j*dr; calc_mfb_EH(e,h,x,&bm); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<max;j++){ x[1]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",x[0],x[1],ie[j]); fprintf(fp2,"%g %g %15.14e\n",x[0],x[1],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); printf("intensity plot is finished\n"); free(ie); free(ih); free_mfb(&bm); return 0; }
openmp.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <OpenCL/opencl.h> // #include <omp.h> //Student ID: 21804416 //Name: SUN QIANG //Where use openmp speed up: //Here we have four tasks using openmp to speed up //1. use openmp to sort all columns //2. use openmp to generate blocks //3. The key point here, use openmp to sort all the signtures(29333879), the openmp speed up is very useful here // At first, I can not sort 29333879 signtures with my computer using only one thread, always core down // Then, when I use openmp here, then problem solved. //4. Divide sorted signatures with different parts, and find collisions with openmp //Where still need speed up //There are still some problems we can do to speed up: //The last column(500), generated 29321366 signtures, and the left 499 colunms generated 12510 signtures // So when we find blocks, most time spent on find blocks in 500 columns with one thread, the other threads are idle. // However, the time spent to find the blocks in 500 colunm is not very long; // And we need to keep this program working for any M*N matrix, so I have not done any improvement here. // we can change the N & M to make this program suitable for some other N*M matrix // we can change the dia to other value // Change the Max value if the number of blocks is larger than that #define N 500 #define M 4400 #define dia 0.000001 #define Max 30000000 // --------- // The data array to store the data information // The key array to store key information // When we want to sort values in each column in data, we only need to sort the seq array to store the sequences // Location of data value, not directly modify the data array. // Signs array to store the signtures for blocks // Location array to store the row ids and column for corresponding signture, for example 434044402220999499 means // The block combined by 434,444,222,999 rows from 499 column. // Indexs to store how many blocks generated // Id is the same function as the seq array, to store the location information of the signture //---------- //readData() and readkey() to read values from txt, generate two arrays // Generate() is to generate the seq array // sort() is to sort each column in data // SelectSort() Bubblesort() are used to sort the values (we can use some other methods) // Findblock() and findblockcol() are used to generate columns //Sortsign() and BubbleSortsign() are single thread ways to sort the signtures. // quicksort() is the method which use multiple threads with openmp to sort the signtures based on quicksort. // resulttxt() is used to print the outcome with two colunm (signture,id) static double data[N][M]; static long key[M]; static int seq[N][M]; static long signs[Max]; static long location[Max]; static int indexs; static int id[Max]={-1}; int k=0; void readData(); void readkey(); void generate(); void sort(); void SelectSort(int a[],int n,double b[]); void BubbleSort(int a[],int n,double b[]); void findblock(); void findblockcol(int i,double x[]); void Sortsign(int a[],int n, long b[]); void BubbleSortsign(int a[],int n,long b[]); void resulttxt(); void quicksort(long arr[], int low_index, int high_index); int partition(long * a, int p, int r); int main(){ //start to count time //pre-process for the data readData(); readkey(); generate(); // sort each column sort(); indexs=0; // Find all blocks and store them into signs and location findblock(); //print number of blocks found printf("Number of blcoks found %d\n",indexs); int i; for(i=0;i<indexs;i++){ id[i]=i; } //Sort signture with different ways (Sortsign and bubblesort are using single thread, and quicksort is using openmp) // Sortsign(id,indexs,signs); // quicksort for openmp here quicksort(signs,0,indexs-1); //Find collsions and print it to result.txt resulttxt(); } //Print out the results into results.txt with (signtures,id)(id generate by four row ids and the column id) void resulttxt(){ int left =0; int indexsnon=0; long where; FILE *fp; fp = fopen("results.txt","a"); int i; int col=0; for(i=0;i<indexs;i++){ if(signs[id[i]]!=signs[id[i+1]] && signs[id[i]]!=signs[id[i-1]]){ indexsnon=indexsnon+1; } else{ fprintf(fp,"%ld %ld\n",signs[id[i]],location[id[i]]); col=col+1; } } fclose(fp); printf("The number of collsions found: %d\n",col); } //Read keys void readkey() { FILE *fp; char *line = NULL; size_t len = 0; ssize_t read; int line_index = 0; int column_index = 0; fp = fopen("keys.txt","r"); if (fp ==NULL) exit(EXIT_FAILURE); while((read = getline(&line,&len,fp))!=-1) { char *pch; pch=strtok(line," "); while (pch!=NULL) { sscanf(pch,"%ld",&key[column_index]); column_index +=1; pch = strtok(NULL," "); } line_index+=1; } fclose(fp); if(line) free(line); } //Read data void readData() { FILE *fp; char *line = NULL; size_t len = 0; ssize_t read; int line_index = 0; int column_index; fp = fopen("data.txt","r"); if (fp ==NULL) exit(EXIT_FAILURE); while((read = getline(&line,&len,fp))!=-1) { char *pch; column_index=0; pch=strtok(line," ,"); while (pch!=NULL) { sscanf(pch,"%lf",&data[column_index][line_index]); column_index +=1; pch = strtok(NULL," ,"); } line_index+=1; } fclose(fp); if(line) free(line); } void generate(){ //Generate an array with N*M, and each a[i] with 0-4399 int i,j; for(i = 0;i<N;i++) { for (j =0;j<M;j++) { seq[i][j]=j; } } } //Sort each column void sort() { int i,j; ////////Here to change #pragma omp parallel for for(i = 0;i < N-1;i++){ // We can use different methods to sort here SelectSort(seq[i],M,data[i]); // BubbleSort(seq[i],M,data[i]); } } //Select sort method to sort column void SelectSort(int a[],int n, double b[]) { int i,j; for(i=0;i<n-1;i++) { int k=i; for(j=i+1;j<n;j++) if(b[a[k]]>b[a[j]]) k=j; if(k!=i) { int temp=a[i]; a[i]=a[k]; a[k]=temp; } } } //Select sort method for signtures void Sortsign(int a[],int n, long b[]) { int i,j; for(i=0;i<n-1;i++) { //printf("%d %ld\n", i,b[i]); int k=i; for(j=i+1;j<n;j++) if(b[a[k]]>b[a[j]]) k=j; if(k!=i) { int temp=a[i]; a[i]=a[k]; a[k]=temp; } } } //Bubble sort for signtures void BubbleSortsign(int a[],int n,long b[]) { int i,j; for(i=n-1;i>0;--i){ // printf("%d %ld\n", i,b[i]); for(j=0;j<i;j++){ if(b[a[j]]>b[a[j+1]]) { int temp=a[j]; a[j]=a[j+1]; a[j+1]=temp; } } } printf("ok\n"); } //Find all blocks void findblock(){ ///////Here to change int i,j; //Use the openmp here to find blocks #pragma omp parallel for //Change i<N-1 to calculate the outcome without last column. i<N to calculate the outcome with last column. for(i=0;i<N-1;i++){ findblockcol(i,data[i]); } } void findblockcol(int i,double x[]){ int nums=0; int a,c,d; long b; long sign; long uni; // FILE *fp; // fp=fopen("blocks.txt","a"); #pragma omp parallel for for(a=0;a<M;a++){ b=a+1; while(x[seq[i][b]]-x[seq[i][a]]<dia && b <M && x[seq[i][a]]!=0 && x[seq[i][b]]!=0) { c=b+1; while(x[seq[i][c]]-x[seq[i][a]]<dia && c<M && x[seq[i][c]]!=0){ d = c+1; while(x[seq[i][d]]-x[seq[i][a]]<dia && d<M && x[seq[i][d]]!=0) { indexs=indexs+1; sign=key[a]+key[b]+key[c]+key[d]; signs[indexs-1]=sign; uni=(a*1000000000000+b*100000000+c*10000+d)*1000+i; location[indexs-1]=uni; // fprintf(fp,"%ld %ld \n",sign,uni); d = d+1; nums=nums+1; } c=c+1; } b=b+1; } } // fclose(fp); printf("Number found for %d column: %d\n",i+1,nums); } int partition(long * a, int p, int r) { int lt[r-p]; int gt[r-p]; int i; int j; int key = a[r]; int lt_n = 0; int gt_n = 0; #pragma omp parallel for for(i = p; i < r; i++){ if(a[i] < a[r]){ lt[lt_n++] = a[i]; }else{ gt[gt_n++] = a[i]; } } for(i = 0; i < lt_n; i++){ a[p + i] = lt[i]; } a[p + lt_n] = key; for(j = 0; j < gt_n; j++){ a[p + lt_n + j + 1] = gt[j]; } return p + lt_n; } void quicksort(long * a, int p, int r) { int div; if(p < r){ div = partition(a, p, r); #pragma omp parallel sections { #pragma omp section quicksort(a, p, div - 1); #pragma omp section quicksort(a, div + 1, r); } } }
GB_binop__isle_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint64) // A*D function (colscale): GB (_AxD__isle_uint64) // D*A function (rowscale): GB (_DxB__isle_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint64) // C=scalar+B GB (_bind1st__isle_uint64) // C=scalar+B' GB (_bind1st_tran__isle_uint64) // C=A+scalar GB (_bind2nd__isle_uint64) // C=A'+scalar GB (_bind2nd_tran__isle_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT64 || GxB_NO_ISLE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
XT_OffsetError.c
/* ============================================================================ * Copyright (c) 2015 K. Aditya Mohan (Purdue University) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of K. Aditya Mohan, Purdue * University, nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <stdio.h> #include "nrutil.h" #include "XT_Constants.h" #include "XT_Structures.h" #include <mpi.h> #include <math.h> #include "XT_IOMisc.h" #include "invert.h" #include "allocate.h" void gen_offset_constraint_windows (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr) { int32_t r_size, t_size, num = 0, i, j, k, l, dim[4], N_t_node, N_r, N_t, node_rank, node_num, node_idx, k_idx, l_idx; char constraint_file[100] = "proj_constraint"; node_rank = TomoInputsPtr->node_rank; node_num = TomoInputsPtr->node_num; N_r = SinogramPtr->N_r; N_t_node = SinogramPtr->N_t; N_t = N_t_node*node_num; r_size = 2*N_r/((int32_t)(sqrt(N_r) + 0.5)); t_size = 2*N_t/((int32_t)(sqrt(N_t) + 0.5)); for (i = 0; i <= N_r - r_size/2; i = i + r_size/2) for (j = 0; j <= N_t - t_size/2; j = j + t_size/2) num++; SinogramPtr->off_constraint = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, num, N_r, N_t_node); memset(&(SinogramPtr->off_constraint[0][0][0]), 0, num*N_r*N_t_node*sizeof(Real_arr_t)); for (num = 0, i = 0; i <= N_r - r_size/2; i = i + r_size/2) for (j = 0; j <= N_t - t_size/2; j = j + t_size/2) { for (k = i; k < i + r_size; k++) for (l = j; l < j + t_size; l++) { node_idx = node_rank*N_t_node; k_idx = k % N_r; l_idx = l % N_t; if (l_idx >= node_idx && l_idx < node_idx + N_t_node) { SinogramPtr->off_constraint[num][k_idx][l_idx-node_idx] = (k-i) < r_size/2 ? (k-i+1): r_size-(k-i); SinogramPtr->off_constraint[num][k_idx][l_idx-node_idx] *= (l-j) < t_size/2 ? (l-j+1): t_size-(l-j); } } num++; } SinogramPtr->off_constraint_num = num; dim[0] = 1; dim[1] = num; dim[2] = SinogramPtr->N_r; dim[3] = SinogramPtr->N_t; sprintf(constraint_file, "%s_n%d", constraint_file, node_rank); if (TomoInputsPtr->Write2Tiff == 1) WriteMultiDimArray2Tiff (constraint_file, dim, 0, 1, 2, 3, &(SinogramPtr->off_constraint[0][0][0]), 0, TomoInputsPtr->debug_file_ptr); fprintf(TomoInputsPtr->debug_file_ptr, "gen_offset_constraint_windows: r_size = %d, t_size = %d, number of constraints = %d\n", r_size, t_size, SinogramPtr->off_constraint_num); /* SinogramPtr->off_constraint_size = SinogramPtr->N_r; SinogramPtr->off_constraint = (Real_t**)multialloc(sizeof(Real_t), 2, 1, SinogramPtr->N_r); for (j = 0; j < SinogramPtr->N_r; j++) SinogramPtr->off_constraint[0][j] = 1; SinogramPtr->off_constraint_num = 1;*/ } void constrained_quad_opt (Real_t** Lambda, Real_t** b, Real_arr_t*** A, Real_arr_t** x, int32_t Nr, int32_t Nt, int32_t M, TomoInputs* TomoInputsPtr) { Real_t **D, **Dinv; Real_t *temp, *temp2; int32_t i, j, k, l; D = (Real_t**)multialloc(sizeof(Real_t), 2, M, M); Dinv = (Real_t**)multialloc(sizeof(Real_t), 2, M, M); temp = (Real_t*)get_spc(M, sizeof(Real_t)); temp2 = (Real_t*)get_spc(M, sizeof(Real_t)); memset(&(D[0][0]), 0, M*M*sizeof(Real_t)); memset(&(Dinv[0][0]), 0, M*M*sizeof(Real_t)); #pragma omp parallel for collapse(2) private(k, l) for (i = 0; i < M; i++) for (j = 0; j < M; j++) for (k = 0; k < Nr; k++) for (l = 0; l < Nt; l++) { D[i][j] += A[i][k][l]*A[j][k][l]/Lambda[k][l]; /*sum += A[i][k]*A[j][k]/Lambda[k];*/ } /* TomoInputsPtr->t0_mpired1 = time(NULL); */ MPI_Allreduce(&(D[0][0]), &(Dinv[0][0]), M*M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ /* printf("Checksum is %f\n", sum);*/ invert2(Dinv, M); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k]*b[j][k]/Lambda[j][k]; } /* TomoInputsPtr->t0_mpired1 = time(NULL);*/ MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ #pragma omp parallel for private(j) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < M; j++) temp[i] += Dinv[i][j]*temp2[j]; } /* TomoInputsPtr->t0_mpired1 = time(NULL); MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ #pragma omp parallel for collapse(2) private(k) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) { x[i][j] = 0; for (k = 0; k < M; k++) x[i][j] += A[k][i][j]*temp[k]; } #pragma omp parallel for collapse(2) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) x[i][j] = (b[i][j] - x[i][j])/Lambda[i][j]; free(temp); free(temp2); multifree(D, 2); multifree(Dinv, 2); } void compute_d_constraint (Real_arr_t*** A, Real_arr_t **d, int32_t Nr, int32_t Nt, int32_t M, FILE* debug_file_ptr) { int32_t i, j, k; Real_t *temp, *val; temp = (Real_t*)get_spc(M, sizeof(Real_t)); val = (Real_t*)get_spc(M, sizeof(Real_t)); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k]*d[j][k]; } MPI_Allreduce(&(temp[0]), &(val[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); for (i = 0; i < M; i++) fprintf(debug_file_ptr, "compute_d_constraint: The i th constraint on offset error is %f\n", val[i]); free(temp); free(val); }
fs_csc_inspector.h
#include<vector> #include <cassert> #include<set> // Makes an edge inside dependence graph inline void connect(int v, int w, std::vector<std::vector<int>> &DAG){ DAG[v].push_back( w ); } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void fs_csc_inspector(int n, int* Lp, int* Li, std::vector<std::vector<int>> &DAG){ // int In_2, In_4, Out_2; // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; connect(In_2,Out_2, DAG); //DAG[In_2].push_back(Out_2); } } } } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void fs_csc_inspector(int n, int* Lp, int* Li, std::vector<std::set<int>> &DAG){ int In_2, In_4, Out_2; // Inspector #pragma omp parallel for private(In_2, In_4,Out_2) schedule(static) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].insert( Out_2 ); } } } }
DRB021-reductionmissing-orig-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A kernel with two level parallelizable loop with reduction: if reduction(+:sum) is missing, there is race condition. Data race pairs: we allow multiple pairs to preserve the pattern. sum@70:7 vs. sum@70:7 sum@70:7 vs. sum@70:13 */ #include <stdio.h> int main(int argc, char * argv[]) { int i, j; float temp, sum = 0.0; int len = 100; float u[100][100]; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<len; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<len; j ++ ) { u[i][j]=0.5; } } #pragma cetus private(i, j, temp) #pragma loop name main#1 #pragma cetus reduction(+: sum) #pragma cetus parallel #pragma omp parallel for private(i, j, temp) reduction(+: sum) for (i=0; i<len; i ++ ) { #pragma cetus private(j, temp) #pragma loop name main#1#0 #pragma cetus reduction(+: sum) #pragma cetus parallel #pragma omp parallel for private(j, temp) reduction(+: sum) for (j=0; j<len; j ++ ) { temp=u[i][j]; sum=(sum+(temp*temp)); } } printf("sum = %f\n", sum); _ret_val_0=0; return _ret_val_0; }
loops.h
// Cuda/host loops #pragma once #include "cutil.h" #include "debug.h" #include "preprocessor.h" #include "print.h" #include <optional> #include <type_traits> #ifndef __APPLE__ #include <omp.h> #endif namespace mandelbrot { using std::is_signed_v; using std::optional; using std::tuple; // For now, assume we fit in int32_t template<class I> __device__ static inline int grid_stride_loop_size(const I n) { static_assert(is_signed_v<I>); return int(n); } // Define a grid stride loop #define GRID_STRIDE_LOOP(n, i) \ for (int _n = grid_stride_loop_size(n), \ _stride = blockDim.x * gridDim.x, \ i = blockIdx.x * blockDim.x + threadIdx.x; \ i < _n; i += _stride) // Call a one-dimensional grid-stride loop #define INVOKE_GRID_STRIDE_LOOP(name, n, ...) CUDA_OR_DIE(({ \ const int _n = (n); /* For now, assume we fit in int32_t */ \ name<<<32*num_sms(), 256>>>(_n, __VA_ARGS__); })) // Define 1D loop functions on CPU and GPU #define DEF_LOOP(name, n, i, args, body) \ IF_CUDA(template<class S> __global__ static void name##_device(const int n, UNPAREN args) { \ GRID_STRIDE_LOOP(n, i) { body } \ }) \ template<class S> static void name##_host(const int n, UNPAREN args) { \ for (int i = 0; i < n; i++) { body } \ } \ template<class... Args> static inline void name(const int64_t n, Args&&... xs) { \ if (!n) return; \ if constexpr ((... || is_device<Args>)) \ INVOKE_GRID_STRIDE_LOOP(name##_device, n, undevice(xs)...); \ else \ name##_host(n, std::forward<Args>(xs)...); \ } // Define a serial function on CPU and GPU (not a loop, but meh). // This is for reducing the number of total kernel invocations in base cases. #define DEF_SERIAL(name, args, body) \ IF_CUDA(template<class S> __global__ static void name##_device(UNPAREN args) { body }) \ template<class S> static void name##_host(UNPAREN args) { body } \ template<class... Args> static inline void name(Args&&... xs) { \ if constexpr ((... || is_device<Args>)) \ CUDA_OR_DIE(name##_device<<<1, 1, 0, stream()>>>(undevice(xs)...)); \ else \ name##_host(std::forward<Args>(xs)...); \ } // Chop a loop into [start,end) chunks tuple<int64_t,int64_t> partition_loop(const int64_t steps, const int threads, const int thread); // Parallel reductions that assume only associativity. // Formally, if (reduce(y, a), reduce(y, b)) is equivalent to (reduce(a, b), reduce(y, a)), then // this routine is equivalent to: // for (int64_t i = 0; i < n; i++) // reduce(y, map(i)); template<class Y, class R, class M> void map_reduce(Y& y, R&& reduce, M&& map, const int64_t n) { #if __APPLE__ for (int64_t i = 0; i < n; i++) reduce(y, map(i)); #else vector<optional<Y>> partials; #pragma omp parallel { const int threads = omp_get_num_threads(); const int thread = omp_get_thread_num(); const auto [start, end] = partition_loop(n, threads, thread); if (start < end) { #pragma omp critical { partials.resize(threads); } auto& p = partials[thread]; for (int64_t i = start; i < end; i++) { auto fx = map(i); if (i == start) p = move(fx); else reduce(*p, fx); } } } for (const auto& t : partials) if (t) reduce(y, *t); #endif } } // namespace mandelbrot
GB_binop__pair_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int8) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int8) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = 1 #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT8 || GxB_NO_PAIR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
bench.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> //------------------------------------------------------------------------------------------------------------------------------ #include <omp.h> #ifdef __MPI #include <mpi.h> #endif //------------------------------------------------------------------------------------------------------------------------------ #include "defines.h" #include "box.h" #include "mg.h" #include "operators.h" //------------------------------------------------------------------------------------------------------------------------------ int main(int argc, char **argv){ int MPI_Rank=0; int MPI_Tasks=1; int OMP_Threads = 1; #pragma omp parallel { #pragma omp master { OMP_Threads = omp_get_num_threads(); } } #ifdef __MPI #warning Compiling for MPI... int MPI_threadingModel = -1; //int MPI_threadingModelRequested = MPI_THREAD_SINGLE; //int MPI_threadingModelRequested = MPI_THREAD_SERIALIZED; int MPI_threadingModelRequested = MPI_THREAD_FUNNELED; //int MPI_threadingModelRequested = MPI_THREAD_MULTIPLE; #ifdef __MPI_THREAD_MULTIPLE MPI_threadingModelRequested = MPI_THREAD_MULTIPLE; #endif MPI_Init_thread(&argc, &argv, MPI_threadingModelRequested, &MPI_threadingModel); MPI_Comm_size(MPI_COMM_WORLD, &MPI_Tasks); MPI_Comm_rank(MPI_COMM_WORLD, &MPI_Rank); if(MPI_threadingModel>MPI_threadingModelRequested)MPI_threadingModel=MPI_threadingModelRequested; if(MPI_Rank==0){ if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, "); else if(MPI_threadingModelRequested == MPI_THREAD_SINGLE )printf("Requested MPI_THREAD_SINGLE, "); else if(MPI_threadingModelRequested == MPI_THREAD_FUNNELED )printf("Requested MPI_THREAD_FUNNELED, "); else if(MPI_threadingModelRequested == MPI_THREAD_SERIALIZED)printf("Requested MPI_THREAD_SERIALIZED, "); else if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, "); else printf("Requested Unknown MPI Threading Model (%d), ",MPI_threadingModelRequested); if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n"); else if(MPI_threadingModel == MPI_THREAD_SINGLE )printf("got MPI_THREAD_SINGLE\n"); else if(MPI_threadingModel == MPI_THREAD_FUNNELED )printf("got MPI_THREAD_FUNNELED\n"); else if(MPI_threadingModel == MPI_THREAD_SERIALIZED)printf("got MPI_THREAD_SERIALIZED\n"); else if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n"); else printf("got Unknown MPI Threading Model (%d)\n",MPI_threadingModel); fflush(stdout); } #ifdef __MPI_THREAD_MULTIPLE if( (MPI_threadingModelRequested == MPI_THREAD_MULTIPLE) && (MPI_threadingModel != MPI_THREAD_MULTIPLE) ){MPI_Finalize();exit(0);} #endif #endif int log2_subdomain_dim = 6; int subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim); int subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim); int subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim); int ranks_in_i=1; int ranks_in_j=1; int ranks_in_k=1; if(argc==2){ log2_subdomain_dim=atoi(argv[1]); subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim); subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim); subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim); }else if(argc==5){ log2_subdomain_dim=atoi(argv[1]); subdomains_per_rank_in_i=atoi(argv[2]); subdomains_per_rank_in_j=atoi(argv[3]); subdomains_per_rank_in_k=atoi(argv[4]); }else if(argc==8){ log2_subdomain_dim=atoi(argv[1]); subdomains_per_rank_in_i=atoi(argv[2]); subdomains_per_rank_in_j=atoi(argv[3]); subdomains_per_rank_in_k=atoi(argv[4]); ranks_in_i=atoi(argv[5]); ranks_in_j=atoi(argv[6]); ranks_in_k=atoi(argv[7]); }else if(argc!=1){ if(MPI_Rank==0){printf("usage: ./a.out [log2_subdomain_dim] [subdomains per rank in i,j,k] [ranks in i,j,k]\n");} #ifdef __MPI MPI_Finalize(); #endif exit(0); } /* if(log2_subdomain_dim>7){ if(MPI_Rank==0){printf("error, log2_subdomain_dim(%d)>7\n",log2_subdomain_dim);} #ifdef __MPI MPI_Finalize(); #endif exit(0); } */ if(ranks_in_i*ranks_in_j*ranks_in_k != MPI_Tasks){ if(MPI_Rank==0){printf("error, ranks_in_i*ranks_in_j*ranks_in_k(%d*%d*%d=%d) != MPI_Tasks(%d)\n",ranks_in_i,ranks_in_j,ranks_in_k,ranks_in_i*ranks_in_j*ranks_in_k,MPI_Tasks);} #ifdef __MPI MPI_Finalize(); #endif exit(0); } if(MPI_Rank==0)printf("%d MPI Tasks of %d threads\n",MPI_Tasks,OMP_Threads); int subdomain_dim_i=1<<log2_subdomain_dim; int subdomain_dim_j=1<<log2_subdomain_dim; int subdomain_dim_k=1<<log2_subdomain_dim; // fine dim = 128 64 32 16 8 4 // levels = 6 5 4 3 2 1 int log2_coarse_dim = 2; // i.e. coarsen to 4^3 //int log2_coarse_dim = 1; // i.e. coarsen to 2^3 int levels_in_vcycle=1+log2_subdomain_dim-log2_coarse_dim; // ie 1+log2(fine grid size)-log2(bottom grid size) if(MPI_Rank==0){printf("truncating the v-cycle at %d^3 subdomains\n",1<<log2_coarse_dim);fflush(stdout);} //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - int box; domain_type domain_1 ; domain_type domain_CA; int boundary_conditions[3] = {__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC}; // i-, j-, and k-directions //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - create_domain(&domain_1 , subdomain_dim_i,subdomain_dim_j,subdomain_dim_k, subdomains_per_rank_in_i,subdomains_per_rank_in_j,subdomains_per_rank_in_k, ranks_in_i,ranks_in_j,ranks_in_k, MPI_Rank, boundary_conditions, __NumGrids,1,levels_in_vcycle); create_domain(&domain_CA, subdomain_dim_i,subdomain_dim_j,subdomain_dim_k, subdomains_per_rank_in_i,subdomains_per_rank_in_j,subdomains_per_rank_in_k, ranks_in_i,ranks_in_j,ranks_in_k, MPI_Rank, boundary_conditions, __NumGrids,4,levels_in_vcycle); double h0=1.0/((double)(domain_1.dim.i)); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if(MPI_Rank==0){printf("initializing alpha, beta, RHS ...");fflush(stdout);} double a=0.9; double b=0.9; initialize_problem(&domain_1 ,0,h0,a,b); initialize_problem(&domain_CA,0,h0,a,b); if(MPI_Rank==0){printf("done\n");fflush(stdout);} //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - MGBuild(&domain_1 ,a,b,h0); // restrictions, dominant eigenvalue, etc... MGBuild(&domain_CA,a,b,h0); // restrictions, dominant eigenvalue, etc... //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - int s,sMax=2; #ifdef __MPI sMax=4; #endif //Make initial an guess for u(=0)... Solve Lu=f to precision of 1e-10...print the benchmarking timing results... MGResetTimers(&domain_1 );for(s=0;s<sMax;s++){zero_grid(&domain_1 ,0,__u); MGSolve(&domain_1 ,__u,__f,a,b,1e-15);}print_timing(&domain_1 ); MGResetTimers(&domain_CA);for(s=0;s<sMax;s++){zero_grid(&domain_CA,0,__u); MGSolve(&domain_CA,__u,__f,a,b,1e-15);}print_timing(&domain_CA); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - destroy_domain(&domain_1 ); destroy_domain(&domain_CA); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #ifdef __MPI MPI_Finalize(); #endif //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - return(0); }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; mutable IdentifierInfo *Ident_abstract; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector" /// and "bool" fast comparison. Only present if AltiVec or ZVector are /// enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; IdentifierInfo *Ident_Bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> MSFenvAccess; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && Tok.getIdentifierInfo() != Ident_Bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser &p) : P(p), PrevPreferredType(P.PreferredType) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {} ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseSYCLUniqueStableNameExpression(); ExprResult ParseSYCLUniqueStableIdExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>(), bool FailImmediatelyOnInvalidExpr = false, bool EarlyTypoCorrection = false); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, ForRangeInfo *FRI = nullptr, bool EnterForConditionScope = false); DeclGroupPtrTy ParseAliasDeclarationInInitStatement(DeclaratorContext Context, ParsedAttributesWithRange &Attrs); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); struct DesignatorCompletionInfo { SmallVectorImpl<Expr *> &InitExprs; QualType PreferredBaseType; }; ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, SourceLocation *LParenLoc, SourceLocation *RParenLoc); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return Tok.is(tok::kw_using) || isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); /// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of /// _BitInt as an extension when appropriate. void DiagnoseBitIntUse(const Token &Tok); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID, bool DiagnoseEmptyAttrs = false); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Emit warnings for C++11 and C2x attributes that are in a position that /// clang accepts as an extension. void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr); void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); } /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. bool MaybeParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(Attrs, EndLoc, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); return true; } return false; } bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(Attrs, EndLoc, LateAttrs); return true; } return false; } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. void ParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D); Attrs.takeAllFrom(AttrsWithRange); } void ParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) { // If parsing the attributes found an OpenMP directive, emit those tokens // to the parse stream now. if (!OpenMPTokens.empty()) { PP.EnterToken(Tok, /*IsReinject*/ true); PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true, /*IsReinject*/ true); ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true); } } void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(attrs, endLoc); return true; } return false; } void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName, CachedTokens &OpenMPTokens); void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs, CachedTokens &OpenMPTokens, SourceLocation *EndLoc = nullptr); void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr) { CachedTokens OpenMPTokens; ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc); ReplayOpenMPAttributeTokens(OpenMPTokens); } void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, CachedTokens &OpenMPTokens); IdentifierInfo *TryParseCXX11AttributeIdentifier( SourceLocation &Loc, Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None, const IdentifierInfo *EnclosingScope = nullptr); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs, End); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; bool isClassCompatibleKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributesWithRange &Attrs, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse an 'append_args' clause for '#pragma omp declare variant'. bool parseOpenMPAppendArgs( SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, OpenMPDirectiveKind EndDKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. /// \param ReadDirectiveWithinMetadirective true if directive is within a /// metadirective and therefore ends on the closing paren. StmtResult ParseOpenMPDeclarativeOrExecutableDirective( ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective = false); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses indirect clause /// \param ParseOnly true to skip the clause's semantic actions and return // false; bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); /// Parses clause with an interop variable of kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. // OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc, TemplateTy NameHint = nullptr); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs, TemplateTy Template, SourceLocation OpenLoc); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); /// SYCL Type Traits // __builtin_num_fields, __builtin_num_bases ExprResult ParseSYCLBuiltinNum(); // __builtin_field_type, __builtin_base_type ExprResult ParseSYCLBuiltinType(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2053 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates >> 2)) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); if ((IsNaN(value) != 0) || (value < -(SSIZE_MAX-512.0)) || (value > (SSIZE_MAX-512.0))) return(0.0); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; if (coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info, ExpandAffine(&graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); status&=SetImageInfo(clone_info,0,exception); if ((LocaleNCompare(clone_info->magick,"http",4) == 0) || (LocaleCompare(clone_info->magick,"mpri") == 0)) (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (coordinates > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(MaxStrokePad,MaxStrokePad); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
SpatialMaxUnpooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/SpatialMaxUnpooling.c" #else static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(scalar_t *input_p, scalar_t *output_p, THIndex_t *ind_p, int nslices, int iwidth, int iheight, int owidth, int oheight) { int k; int has_error = 0; THIndex_t error_index = 0; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { scalar_t *output_p_k = output_p + k*owidth*oheight; scalar_t *input_p_k = input_p + k*iwidth*iheight; THIndex_t *ind_p_k = ind_p + k*iwidth*iheight; int i, j; THIndex_t maxp; for(i = 0; i < iheight; i++) { for(j = 0; j < iwidth; j++) { maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */ if(maxp<0 || maxp>=owidth*oheight){ #pragma omp critical { has_error = 1; error_index = maxp; } } else { output_p_k[maxp] = input_p_k[i*iwidth + j]; /* update output */ } } } } if (has_error) { THError("found an invalid max index %ld (output volumes are of size %dx%d)", error_index, oheight, owidth); } } void THNN_(SpatialMaxUnpooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int owidth, int oheight) { int dimw = 2; int dimh = 1; int nbatch = 1; int nslices; int iheight; int iwidth; scalar_t *input_data; scalar_t *output_data; THIndex_t *indices_data; AT_CHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input, but got sizes: ", input->sizes()); THNN_CHECK_SHAPE_INDICES(input, indices); if (input->dim() == 4) { nbatch = input->size(0); dimw++; dimh++; } /* sizes */ nslices = input->size(dimh-1); iheight = input->size(dimh); iwidth = input->size(dimw); /* get contiguous input and indices */ input = THTensor_(newContiguous)(input); indices = THIndexTensor_(newContiguous)(indices); /* resize output */ if (input->dim() == 3) { THTensor_(resize3d)(output, nslices, oheight, owidth); THTensor_(zero)(output); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); indices_data = THIndexTensor_(data)(indices); THNN_(SpatialMaxUnpooling_updateOutput_frame)(input_data, output_data, indices_data, nslices, iwidth, iheight, owidth, oheight); } else { int p; THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); THTensor_(zero)(output); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); indices_data = THIndexTensor_(data)(indices); for (p = 0; p < nbatch; p++) { THNN_(SpatialMaxUnpooling_updateOutput_frame)( input_data+p*nslices*iwidth*iheight, output_data+p*nslices*owidth*oheight, indices_data+p*nslices*iwidth*iheight, nslices, iwidth, iheight, owidth, oheight); } } /* cleanup */ c10::raw::intrusive_ptr::decref(input); THIndexTensor_(free)(indices); } static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(scalar_t *gradInput_p, scalar_t *gradOutput_p, THIndex_t *ind_p, int nslices, int iwidth, int iheight, int owidth, int oheight) { int k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { scalar_t *gradInput_p_k = gradInput_p + k*iwidth*iheight; scalar_t *gradOutput_p_k = gradOutput_p + k*owidth*oheight; THIndex_t *ind_p_k = ind_p + k*iwidth*iheight; int i, j; THIndex_t maxp; for(i = 0; i < iheight; i++) { for(j = 0; j < iwidth; j++) { maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */ if(maxp < 0 || maxp >= owidth * oheight) { THError("invalid max index %ld, owidth= %d, oheight= %d", maxp, owidth, oheight); } gradInput_p_k[i*iwidth + j] = gradOutput_p_k[maxp]; /* update gradient */ } } } } void THNN_(SpatialMaxUnpooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices, int owidth, int oheight) { int dimw = 2; int dimh = 1; int nbatch = 1; int nslices; int iheight; int iwidth; scalar_t *gradInput_data; scalar_t *gradOutput_data; THIndex_t *indices_data; THNN_CHECK_SHAPE_INDICES(input, indices); /* get contiguous gradOutput and indices */ gradOutput = THTensor_(newContiguous)(gradOutput); indices = THIndexTensor_(newContiguous)(indices); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->dim() == 4) { nbatch = input->size(0); dimw++; dimh++; } /* sizes */ nslices = input->size(dimh-1); iheight = input->size(dimh); iwidth = input->size(dimw); if(owidth!=gradOutput->size(dimw) || oheight!=gradOutput->size(dimh)){ THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d", oheight, owidth, gradOutput->size(dimh), gradOutput->size(dimw)); } /* get raw pointers */ gradInput_data = gradInput->data<scalar_t>(); gradOutput_data = gradOutput->data<scalar_t>(); indices_data = THIndexTensor_(data)(indices); /* backprop */ if (input->dim() == 3) { THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data, indices_data, nslices, iwidth, iheight, owidth, oheight); } else { int p; for (p = 0; p < nbatch; p++) { THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight, indices_data+p*nslices*iwidth*iheight, nslices, iwidth, iheight, owidth, oheight); } } /* cleanup */ c10::raw::intrusive_ptr::decref(gradOutput); THIndexTensor_(free)(indices); } #endif
yescrypt-simd.c
/*- * Copyright 2009 Colin Percival * Copyright 2012-2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ /* * On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding * gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX * and XOP are of further help either way. */ #ifndef __SSE4_1__ #warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance" #endif #include <emmintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #include <errno.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "sha256_Y.h" #include "sysendian.h" #include "yescrypt.h" #include "yescrypt-platform.c" #if __STDC_VERSION__ >= 199901L /* have restrict */ #elif defined(__GNUC__) #define restrict __restrict #else #define restrict #endif #define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint)); #define PREFETCH_OUT(x, hint) /* disabled */ #ifdef __XOP__ #define ARX(out, in1, in2, s) \ out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s)); #else #define ARX(out, in1, in2, s) \ { \ __m128i T = _mm_add_epi32(in1, in2); \ out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \ out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \ } #endif #define SALSA20_2ROUNDS \ /* Operate on "columns" */ \ ARX(X1, X0, X3, 7) \ ARX(X2, X1, X0, 9) \ ARX(X3, X2, X1, 13) \ ARX(X0, X3, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x93); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x39); \ \ /* Operate on "rows" */ \ ARX(X3, X0, X1, 7) \ ARX(X2, X3, X0, 9) \ ARX(X1, X2, X3, 13) \ ARX(X0, X1, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x39); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x93); /** * Apply the salsa20/8 core to the block provided in (X0 ... X3). */ #define SALSA20_8_BASE(maybe_decl, out) \ { \ maybe_decl Y0 = X0; \ maybe_decl Y1 = X1; \ maybe_decl Y2 = X2; \ maybe_decl Y3 = X3; \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ (out)[0] = X0 = _mm_add_epi32(X0, Y0); \ (out)[1] = X1 = _mm_add_epi32(X1, Y1); \ (out)[2] = X2 = _mm_add_epi32(X2, Y2); \ (out)[3] = X3 = _mm_add_epi32(X3, Y3); \ } #define SALSA20_8(out) \ SALSA20_8_BASE(__m128i, out) /** * Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3). */ #define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \ X0 = _mm_xor_si128(X0, Z0); \ X1 = _mm_xor_si128(X1, Z1); \ X2 = _mm_xor_si128(X2, Z2); \ X3 = _mm_xor_si128(X3, Z3); \ SALSA20_8_BASE(maybe_decl, out) #define SALSA20_8_XOR_MEM(in, out) \ SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out) #define SALSA20_8_XOR_REG(out) \ SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out) typedef union { uint32_t w[16]; __m128i q[4]; } salsa20_blk_t; /** * blockmix_salsa8(Bin, Bout, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. */ static inline void blockmix_salsa8(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r) { __m128i X0, X1, X2, X3; size_t i; r--; PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ X0 = Bin[r * 2 + 1].q[0]; X1 = Bin[r * 2 + 1].q[1]; X2 = Bin[r * 2 + 1].q[2]; X3 = Bin[r * 2 + 1].q[3]; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q) } /* * (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs * starting with Sandy Bridge. Additionally, PSHUFD uses separate source and * destination registers, whereas the shifts would require an extra move * instruction for our code when building without AVX. Unfortunately, PSHUFD * is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ) * and somewhat slower on some non-Intel CPUs (luckily not including AMD * Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a * win in terms of throughput or/and not needing a move instruction, we * currently use it despite of the higher latency on some older CPUs. As an * alternative, the #if below may be patched to only enable use of (V)PSHUFD * when building with SSE4.1 or newer, which is not available on older CPUs * where this instruction has higher latency. */ #if 1 #define HI32(X) \ _mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1)) #elif 0 #define HI32(X) \ _mm_srli_si128((X), 4) #else #define HI32(X) \ _mm_srli_epi64((X), 32) #endif #if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__)) /* Intel's name, also supported by recent gcc */ #define EXTRACT64(X) _mm_cvtsi128_si64(X) #elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__) /* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */ #define EXTRACT64(X) _mm_cvtsi128_si64x(X) #elif defined(__x86_64__) && defined(__SSE4_1__) /* No known bugs for this intrinsic */ #include <smmintrin.h> #define EXTRACT64(X) _mm_extract_epi64((X), 0) #elif defined(__SSE4_1__) /* 32-bit */ #include <smmintrin.h> #if 0 /* This is currently unused by the code below, which instead uses these two * intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32)) #endif #else /* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32)) #endif /* This is tunable */ #define S_BITS 8 /* Not tunable in this implementation, hard-coded in a few places */ #define S_SIMD 2 #define S_P 4 /* Number of S-boxes. Not tunable by design, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable except via S_BITS above. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8) #if !defined(__x86_64__) && defined(__SSE4_1__) /* 32-bit with SSE4.1 */ #define PWXFORM_X_T __m128i #define PWXFORM_SIMD(X, x, s0, s1) \ x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \ s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \ s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #else /* 64-bit, or 32-bit without SSE4.1 */ #define PWXFORM_X_T uint64_t #define PWXFORM_SIMD(X, x, s0, s1) \ x = EXTRACT64(X) & S_MASK2; \ s0 = *(const __m128i *)(S0 + (uint32_t)x); \ s1 = *(const __m128i *)(S1 + (x >> 32)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #endif #define PWXFORM_ROUND \ PWXFORM_SIMD(X0, x0, s00, s01) \ PWXFORM_SIMD(X1, x1, s10, s11) \ PWXFORM_SIMD(X2, x2, s20, s21) \ PWXFORM_SIMD(X3, x3, s30, s31) #define PWXFORM \ { \ PWXFORM_X_T x0, x1, x2, x3; \ __m128i s00, s01, s10, s11, s20, s21, s30, s31; \ PWXFORM_ROUND PWXFORM_ROUND \ PWXFORM_ROUND PWXFORM_ROUND \ PWXFORM_ROUND PWXFORM_ROUND \ } #define XOR4(in) \ X0 = _mm_xor_si128(X0, (in)[0]); \ X1 = _mm_xor_si128(X1, (in)[1]); \ X2 = _mm_xor_si128(X2, (in)[2]); \ X3 = _mm_xor_si128(X3, (in)[3]); #define OUT(out) \ (out)[0] = X0; \ (out)[1] = X1; \ (out)[2] = X2; \ (out)[3] = X3; /** * blockmix_pwxform(Bin, Bout, r, S): * Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. */ static void blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3; size_t i; if (!S) { blockmix_salsa8(Bin, Bout, r); return; } S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; PREFETCH(&Bin[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } PREFETCH_OUT(&Bout[r], _MM_HINT_T0) /* X <-- B_{r1 - 1} */ X0 = Bin[r].q[0]; X1 = Bin[r].q[1]; X2 = Bin[r].q[2]; X3 = Bin[r].q[3]; /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { /* X <-- H'(X \xor B_i) */ XOR4(Bin[i].q) PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin[i].q) PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) } #define XOR4_2(in1, in2) \ X0 = _mm_xor_si128((in1)[0], (in2)[0]); \ X1 = _mm_xor_si128((in1)[1], (in2)[1]); \ X2 = _mm_xor_si128((in1)[2], (in2)[2]); \ X3 = _mm_xor_si128((in1)[3], (in2)[3]); static inline uint32_t blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, int Bin2_in_ROM) { __m128i X0, X1, X2, X3; size_t i; r--; if (Bin2_in_ROM) { PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_NTA) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) } else { PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_T0) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) } PREFETCH(&Bin1[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[0].q) SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2 + 1].q) SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2].q) SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[r * 2 + 1].q) SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q) return _mm_cvtsi128_si32(X0); } static uint32_t blockmix_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, int Bin2_in_ROM, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3; size_t i; if (!S) return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM); S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; if (Bin2_in_ROM) { PREFETCH(&Bin2[r], _MM_HINT_NTA) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_NTA) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } } else { PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } } PREFETCH_OUT(&Bout[r], _MM_HINT_T0); /* X <-- B_{r1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { /* X <-- H'(X \xor B_i) */ XOR4(Bin1[i].q) XOR4(Bin2[i].q) PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin1[i].q) XOR4(Bin2[i].q) PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef XOR4 #define XOR4(in, out) \ (out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \ (out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \ (out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \ (out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]); static inline uint32_t blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1, salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r) { __m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3; size_t i; r--; PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_T0) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) PREFETCH(&Bin1[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[0].q, Bin2[0].q) SALSA20_8_XOR_REG(Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q) SALSA20_8_XOR_REG(Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2].q, Bin2[i * 2].q) SALSA20_8_XOR_REG(Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) SALSA20_8_XOR_REG(Bout[r * 2 + 1].q) return _mm_cvtsi128_si32(X0); } #define XOR4_Y \ X0 = _mm_xor_si128(X0, Y0); \ X1 = _mm_xor_si128(X1, Y1); \ X2 = _mm_xor_si128(X2, Y2); \ X3 = _mm_xor_si128(X3, Y3); static uint32_t blockmix_xor_save(const salsa20_blk_t *restrict Bin1, salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3; size_t i; if (!S) return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r); S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } PREFETCH_OUT(&Bout[r], _MM_HINT_T0); /* X <-- B_{r1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { XOR4(Bin1[i].q, Bin2[i].q) /* X <-- H'(X \xor B_i) */ XOR4_Y PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin1[i].q, Bin2[i].q) XOR4_Y PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef ARX #undef SALSA20_2ROUNDS #undef SALSA20_8 #undef SALSA20_8_XOR_ANY #undef SALSA20_8_XOR_MEM #undef SALSA20_8_XOR_REG #undef PWXFORM_SIMD_1 #undef PWXFORM_SIMD_2 #undef PWXFORM_ROUND #undef PWXFORM #undef OUT #undef XOR4 #undef XOR4_2 #undef XOR4_Y /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint32_t integerify(const salsa20_blk_t * B, size_t r) { return B[2 * r - 1].w[0]; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 128r bytes in length. The value N must be even and no * smaller than 2. The array V must be aligned to a multiple of 64 bytes, and * arrays B and XY to a multiple of at least 16 bytes (aligning them to 64 * bytes as well saves cache lines, but might result in cache bank conflicts). */ static void smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { const salsa20_blk_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 2 * r; salsa20_blk_t * X = V, * Y; uint32_t i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } if (NROM && (VROM_mask & 1)) { uint32_t n; salsa20_blk_t * V_n; const salsa20_blk_t * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, S); X = &V[2 * s]; if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); V_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ j = blockmix_xor(Y, V_j, X, r, 1, S); } else { /* X <-- H(X) */ blockmix(Y, X, r, S); j = integerify(X, r); } for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V_n[i * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); if (((n + i) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; V_j = &VROM[j * s]; } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i; V_j = &V[j * s]; } /* X <-- H(X \xor VROM_j) */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 1, S); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); if (((N - 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; V_j = &VROM[j * s]; } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 1 - n; V_j = &V[j * s]; } /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 1, S); } else if (flags & YESCRYPT_RW) { uint32_t n; salsa20_blk_t * V_n, * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[2 * s]; blockmix(Y, X, r, S); j = integerify(X, r); for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { Y = &V_n[i * s]; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ j = blockmix_xor(X, V_j, Y, r, 0, S); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 0, S); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 1 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 0, S); } else { /* 2: for i = 0 to N - 1 do */ for (i = 1; i < N - 1; i += 2) { /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[(i + 1) * s]; blockmix(Y, X, r, S); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ X = XY; blockmix(Y, X, r, S); } /* B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r bytes in length. The value N must be a power of 2 * greater than 1. The value Nloop must be even. The array V must be aligned * to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16 * bytes (aligning them to 64 bytes as well saves cache lines, but might result * in cache bank conflicts). */ static void smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { const salsa20_blk_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 2 * r; salsa20_blk_t * X = XY, * Y = &XY[s]; uint64_t i; uint32_t j; size_t k; if (Nloop == 0) return; /* X <-- B' */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } i = Nloop / 2; /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* * Normally, NROM implies YESCRYPT_RW, but we check for these separately * because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls * operating on the entire V. */ if (NROM && (flags & YESCRYPT_RW)) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor_save(X, V_j, Y, r, S); if (((i + 1) & VROM_mask) == 1) { const salsa20_blk_t * VROM_j; j &= NROM - 1; VROM_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, VROM_j, X, r, 1, S); } else { j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor_save(Y, V_j, X, r, S); } j &= N - 1; V_j = &V[j * s]; } } else if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor(X, V_j, Y, r, 0, S); if (((i + 1) & VROM_mask) == 1) { j &= NROM - 1; V_j = &VROM[j * s]; } else { j &= N - 1; V_j = &V[j * s]; } /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 1, S); j &= N - 1; V_j = &V[j * s]; } } else if (flags & YESCRYPT_RW) { /* 6: for i = 0 to N - 1 do */ do { salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(X, V_j, Y, r, S); j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(Y, V_j, X, r, S); j &= N - 1; } while (--i); } else { /* 6: for i = 0 to N - 1 do */ do { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(X, V_j, Y, r, 0, S); j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 0, S); j &= N - 1; } while (--i); } /* 10: B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage XY * must be 256r or 256rp bytes in length (the larger size is required with * OpenMP-enabled builds). The value N must be a power of 2 greater than 1. * The array V must be aligned to a multiple of 64 bytes, and arrays B and * XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well * saves cache lines and helps avoid false sharing in OpenMP-enabled builds * when p > 1, but it might also result in cache bank conflicts). */ static void smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { size_t s = 2 * r; uint32_t Nchunk = N / p; uint64_t Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint32_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint32_t Vchunk = i * Nchunk; uint8_t * Bp = &B[128 * r * i]; salsa20_blk_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 128, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint8_t * Bp = &B[128 * r * i]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. (This optimized implementation currently additionally * limits N to the range from 8 to 2^31, but other implementation might not.) * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint8_t * B, * S; salsa20_blk_t * V, * XY; uint8_t sha256[32]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (N > UINT32_MAX) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) { errno = EINVAL; return -1; } if ((r > SIZE_MAX / 256 / p) || (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / S_SIZE_ALL) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (NROM > UINT32_MAX) { errno = EFBIG; return -1; } if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (salsa20_blk_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL; #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint8_t *)tmp.aligned; XY = (salsa20_blk_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint8_t *)local->aligned; V = (salsa20_blk_t *)((uint8_t *)B + B_size); XY = (salsa20_blk_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint8_t *)XY + XY_size; if (t || flags) { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, passwd, passwdlen); SHA256_Final_Y(sha256, &ctx); passwd = sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size); if (t || flags) memcpy(sha256, B, sizeof(sha256)); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, &V[(size_t)2 * r * i * N], NROM, shared, &XY[(size_t)4 * r * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX_Y ctx; HMAC_SHA256_Init_Y(&ctx, buf, buflen); #if 0 /* Proper yescrypt */ HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); #else /* GlobalBoost-Y buggy yescrypt */ HMAC_SHA256_Update_Y(&ctx, salt, saltlen); #endif HMAC_SHA256_Final_Y(sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, sha256, sizeof(sha256)); SHA256_Final_Y(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
GB_binop__lor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_int32 // A.*B function (eWiseMult): GB_AemultB__lor_int32 // A*D function (colscale): GB_AxD__lor_int32 // D*A function (rowscale): GB_DxB__lor_int32 // C+=B function (dense accum): GB_Cdense_accumB__lor_int32 // C+=b function (dense accum): GB_Cdense_accumb__lor_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int32 // C=scalar+B GB_bind1st__lor_int32 // C=scalar+B' GB_bind1st_tran__lor_int32 // C=A+scalar GB_bind2nd__lor_int32 // C=A'+scalar GB_bind2nd_tran__lor_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT32 || GxB_NO_LOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB085-threadprivate-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. Use threadprivate to avoid data races. */ #include <stdio.h> #include <assert.h> #include <omp.h> int sum0 = 0; int sum1 = 0; void foo(int i) { sum0 = sum0 + i; } int main() { int len = 1000; int i; int sum = 0; for (i = 0; i <= len - 1; i += 1) { foo(i); } sum = sum + sum0; /* reference calculation */ #pragma omp parallel for private (i) reduction (+:sum1) firstprivate (len) for (i = 0; i <= len - 1; i += 1) { sum1 = sum1 + i; } printf("sum=%d; sum1=%d\n",sum,sum1); (((void )(sizeof(((sum == sum1?1 : 0))))) , (( { if (sum == sum1) ; else __assert_fail("sum==sum1","DRB085-threadprivate-orig-no.c",75,__PRETTY_FUNCTION__); }))); return 0; }
DRB061-matrixvector1-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Matrix-vector multiplication: outer-level loop parallelization */ double a[100][100], v[100], v_out[100]; int init() { int i, j, k; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name init#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { #pragma cetus lastprivate(j) #pragma loop name init#0#0 #pragma cetus parallel #pragma omp parallel for lastprivate(j) for (j=0; j<100; j ++ ) { a[i][j]=(i*j); } v_out[i]=(i*j); v[i]=(i*j); } _ret_val_0=0; return _ret_val_0; } int mv() { int i, j; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name mv#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { float sum = 0.0; #pragma cetus private(j) #pragma loop name mv#0#0 #pragma cetus reduction(+: sum) #pragma cetus parallel #pragma omp parallel for private(j) reduction(+: sum) for (j=0; j<100; j ++ ) { sum+=(a[i][j]*v[j]); } v_out[i]=sum; } _ret_val_0=0; return _ret_val_0; } int print() { int i, j, k; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name print#0 for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name print#0#0 for (j=0; j<100; j ++ ) { printf("%lf\n", a[i][j]); } printf("%lf\n", v_out[i]); printf("%lf\n", v[i]); } _ret_val_0=0; return _ret_val_0; } int main() { int _ret_val_0; init(); mv(); print(); _ret_val_0=0; return _ret_val_0; }
GB_binop__gt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_08__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_02__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_04__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int64) // A*D function (colscale): GB (_AxD__gt_int64) // D*A function (rowscale): GB (_DxB__gt_int64) // C+=B function (dense accum): GB (_Cdense_accumB__gt_int64) // C+=b function (dense accum): GB (_Cdense_accumb__gt_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int64) // C=scalar+B GB (_bind1st__gt_int64) // C=scalar+B' GB (_bind1st_tran__gt_int64) // C=A+scalar GB (_bind2nd__gt_int64) // C=A'+scalar GB (_bind2nd_tran__gt_int64) // C type: bool // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_INT64 || GxB_NO_GT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dcraw.c
#ifndef IGNOREALL /* dcraw.c -- Dave Coffin's raw photo decoder Copyright 1997-2015 by Dave Coffin, dcoffin a cybercom o net This is a command-line ANSI C program to convert raw photos from any digital camera on any computer running any operating system. No license is required to download and use dcraw.c. However, to lawfully redistribute dcraw, you must either (a) offer, at no extra charge, full source code* for all executable files containing RESTRICTED functions, (b) distribute this code under the GPL Version 2 or later, (c) remove all RESTRICTED functions, re-implement them, or copy them from an earlier, unrestricted Revision of dcraw.c, or (d) purchase a license from the author. The functions that process Foveon images have been RESTRICTED since Revision 1.237. All other code remains free for all uses. *If you have not modified dcraw.c in any way, a link to my homepage qualifies as "full source code". $Revision: 1.44 $ $Date: 2015/03/08 19:19:51 $ make -f Makefile.devel git commit -a -m "v.102" git push */ /*@out DEFINES #ifndef USE_JPEG #define NO_JPEG #endif #ifndef USE_JASPER #define NO_JASPER #endif @end DEFINES */ #define NO_LCMS #define DCRAW_VERBOSE //@out DEFINES #define DCRAW_VERSION "9.24" #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #define _USE_MATH_DEFINES #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <float.h> #include <limits.h> #include <math.h> #include <setjmp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/types.h> //@end DEFINES #if defined(DJGPP) || defined(__MINGW32__) #define fseeko fseek #define ftello ftell #else #define fgetc getc_unlocked #endif //@out DEFINES #ifdef __CYGWIN__ #include <io.h> #endif #ifdef WIN32 #include <sys/utime.h> #include <winsock2.h> #pragma comment(lib, "ws2_32.lib") #define snprintf _snprintf #define strcasecmp stricmp #define strncasecmp strnicmp //@end DEFINES typedef __int64 INT64; typedef unsigned __int64 UINT64; //@out DEFINES #else #include <unistd.h> #include <utime.h> #include <netinet/in.h> typedef long long INT64; typedef unsigned long long UINT64; #endif #ifdef NODEPS #define NO_JASPER #define NO_JPEG #define NO_LCMS #endif #ifndef NO_JASPER #include <jasper/jasper.h> /* Decode Red camera movies */ #endif #ifndef NO_JPEG #include <jpeglib.h> /* Decode compressed Kodak DC120 photos */ #endif /* and Adobe Lossy DNGs */ #ifndef NO_LCMS #ifdef USE_LCMS #include <lcms.h> /* Support color profiles */ #else #include <lcms2.h> /* Support color profiles */ #endif #endif #ifdef LOCALEDIR #include <libintl.h> #define _(String) gettext(String) #else #define _(String) (String) #endif #ifdef LJPEG_DECODE #error Please compile dcraw.c by itself. #error Do not link it with ljpeg_decode. #endif #ifndef LONG_BIT #define LONG_BIT (8 * sizeof (long)) #endif //@end DEFINES #if !defined(uchar) #define uchar unsigned char #endif #if !defined(ushort) #define ushort unsigned short #endif /* All global variables are defined here, and all functions that access them are prefixed with "CLASS". Note that a thread-safe C++ class cannot have non-const static local variables. */ FILE *ifp, *ofp; short order; const char *ifname; char *meta_data, xtrans[6][6], xtrans_abs[6][6]; char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64],software[64]; float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len; time_t timestamp; off_t strip_offset, data_offset; off_t thumb_offset, meta_offset, profile_offset; unsigned shot_order, kodak_cbpp, exif_cfa, unique_id; unsigned thumb_length, meta_length, profile_length; unsigned thumb_misc, *oprof, fuji_layout, shot_select=0, multi_out=0; unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress; unsigned black, maximum, mix_green, raw_color, zero_is_bad; unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error; unsigned tile_width, tile_length, gpsdata[32], load_flags; unsigned flip, tiff_flip, filters, colors; ushort raw_height, raw_width, height, width, top_margin, left_margin; ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height; ushort *raw_image, (*image)[4], cblack[4102]; ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4]; double pixel_aspect, aber[4]={1,1,1,1}, gamm[6]={ 0.45,4.5,0,0,0,0 }; float bright=1, user_mul[4]={0,0,0,0}, threshold=0; int mask[8][4]; int half_size=0, four_color_rgb=0, document_mode=0, highlight=0; int verbose=0, use_auto_wb=0, use_camera_wb=0, use_camera_matrix=1; int output_color=1, output_bps=8, output_tiff=0, med_passes=0; int no_auto_bright=0; unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX }; float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4]; const double xyz_rgb[3][3] = { /* XYZ from RGB */ { 0.412453, 0.357580, 0.180423 }, { 0.212671, 0.715160, 0.072169 }, { 0.019334, 0.119193, 0.950227 } }; const float d65_white[3] = { 0.950456, 1, 1.088754 }; int histogram[4][0x2000]; void (*write_thumb)(), (*write_fun)(); void (*load_raw)(), (*thumb_load_raw)(); jmp_buf failure; struct decode { struct decode *branch[2]; int leaf; } first_decode[2048], *second_decode, *free_decode; struct tiff_ifd { int t_width, t_height, bps, comp, phint, offset, t_flip, samples, bytes; int t_tile_width, t_tile_length; } tiff_ifd[10]; struct ph1 { int format, key_off, tag_21a; int t_black, split_col, black_col, split_row, black_row; float tag_210; } ph1; #define CLASS //@out DEFINES #define FORC(cnt) for (c=0; c < cnt; c++) #define FORC3 FORC(3) #define FORC4 FORC(4) #define FORCC FORC(colors) #define SQR(x) ((x)*(x)) #define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31)) #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define LIM(x,min,max) MAX(min,MIN(x,max)) #define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y)) #define CLIP(x) LIM(x,0,65535) #define SWAP(a,b) { a=a+b; b=a-b; a=a-b; } #define my_swap(type, i, j) {type t = i; i = j; j = t;} /* In order to inline this calculation, I make the risky assumption that all filter patterns can be described by a repeating pattern of eight rows and two columns Do not use the FC or BAYER macros with the Leaf CatchLight, because its pattern is 16x16, not 2x8. Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2 PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1 0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4: 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M 1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C 2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y 3 C Y C Y C Y 3 G M G M G M 3 G M G M G M 4 C Y C Y C Y 4 Y C Y C Y C PowerShot A5 5 G M G M G M 5 G M G M G M 0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y 7 M G M G M G 7 M G M G M G 0 1 2 3 4 5 0 C Y C Y C Y 1 G M G M G M 2 C Y C Y C Y 3 M G M G M G All RGB cameras use one of these Bayer grids: 0x16161616: 0x61616161: 0x49494949: 0x94949494: 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G 1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B 2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G 3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B */ #define RAW(row,col) \ raw_image[(row)*raw_width+(col)] //@end DEFINES #define FC(row,col) \ (filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3) //@out DEFINES #define BAYER(row,col) \ image[((row) >> shrink)*iwidth + ((col) >> shrink)][FC(row,col)] #define BAYER2(row,col) \ image[((row) >> shrink)*iwidth + ((col) >> shrink)][fcol(row,col)] //@end DEFINES /* @out COMMON #include <math.h> #define CLASS LibRaw:: #include "libraw/libraw_types.h" #define LIBRAW_LIBRARY_BUILD #define LIBRAW_IO_REDEFINED #include "libraw/libraw.h" #include "internal/defines.h" #include "internal/var_defines.h" @end COMMON */ //@out COMMON int CLASS fcol (int row, int col) { static const char filter[16][16] = { { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 }, { 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 }, { 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 }, { 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 }, { 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 }, { 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 }, { 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 }, { 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 }, { 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 }, { 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 }, { 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 }, { 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 }, { 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 }, { 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 }, { 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 }, { 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } }; if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15]; if (filters == 9) return xtrans[(row+6) % 6][(col+6) % 6]; return FC(row,col); } #ifndef __GLIBC__ char *my_memmem (char *haystack, size_t haystacklen, char *needle, size_t needlelen) { char *c; for (c = haystack; c <= haystack + haystacklen - needlelen; c++) if (!memcmp (c, needle, needlelen)) return c; return 0; } #define memmem my_memmem char *my_strcasestr (char *haystack, const char *needle) { char *c; for (c = haystack; *c; c++) if (!strncasecmp(c, needle, strlen(needle))) return c; return 0; } #define strcasestr my_strcasestr #endif //@end COMMON void CLASS merror (void *ptr, const char *where) { if (ptr) return; fprintf (stderr,_("%s: Out of memory in %s\n"), ifname, where); longjmp (failure, 1); } void CLASS derror() { if (!data_error) { fprintf (stderr, "%s: ", ifname); if (feof(ifp)) fprintf (stderr,_("Unexpected end of file\n")); else fprintf (stderr,_("Corrupt data near 0x%llx\n"), (INT64) ftello(ifp)); } data_error++; } //@out COMMON ushort CLASS sget2 (uchar *s) { if (order == 0x4949) /* "II" means little-endian */ return s[0] | s[1] << 8; else /* "MM" means big-endian */ return s[0] << 8 | s[1]; } // DNG was written by: #define CameraDNG 1 #define AdobeDNG 2 #ifdef LIBRAW_LIBRARY_BUILD static ushort saneSonyCameraInfo(uchar a, uchar b, uchar c, uchar d, uchar e, uchar f){ if ((a >> 4) > 9) return 0; else if ((a & 0x0f) > 9) return 0; else if ((b >> 4) > 9) return 0; else if ((b & 0x0f) > 9) return 0; else if ((c >> 4) > 9) return 0; else if ((c & 0x0f) > 9) return 0; else if ((d >> 4) > 9) return 0; else if ((d & 0x0f) > 9) return 0; else if ((e >> 4) > 9) return 0; else if ((e & 0x0f) > 9) return 0; else if ((f >> 4) > 9) return 0; else if ((f & 0x0f) > 9) return 0; return 1; } static ushort bcd2dec(uchar data){ if ((data >> 4) > 9) return 0; else if ((data & 0x0f) > 9) return 0; else return (data >> 4) * 10 + (data & 0x0f); } static uchar SonySubstitution[257] = "\x00\x01\x32\xb1\x0a\x0e\x87\x28\x02\xcc\xca\xad\x1b\xdc\x08\xed\x64\x86\xf0\x4f\x8c\x6c\xb8\xcb\x69\xc4\x2c\x03\x97\xb6\x93\x7c\x14\xf3\xe2\x3e\x30\x8e\xd7\x60\x1c\xa1\xab\x37\xec\x75\xbe\x23\x15\x6a\x59\x3f\xd0\xb9\x96\xb5\x50\x27\x88\xe3\x81\x94\xe0\xc0\x04\x5c\xc6\xe8\x5f\x4b\x70\x38\x9f\x82\x80\x51\x2b\xc5\x45\x49\x9b\x21\x52\x53\x54\x85\x0b\x5d\x61\xda\x7b\x55\x26\x24\x07\x6e\x36\x5b\x47\xb7\xd9\x4a\xa2\xdf\xbf\x12\x25\xbc\x1e\x7f\x56\xea\x10\xe6\xcf\x67\x4d\x3c\x91\x83\xe1\x31\xb3\x6f\xf4\x05\x8a\x46\xc8\x18\x76\x68\xbd\xac\x92\x2a\x13\xe9\x0f\xa3\x7a\xdb\x3d\xd4\xe7\x3a\x1a\x57\xaf\x20\x42\xb2\x9e\xc3\x8b\xf2\xd5\xd3\xa4\x7e\x1f\x98\x9c\xee\x74\xa5\xa6\xa7\xd8\x5e\xb0\xb4\x34\xce\xa8\x79\x77\x5a\xc1\x89\xae\x9a\x11\x33\x9d\xf5\x39\x19\x65\x78\x16\x71\xd2\xa9\x44\x63\x40\x29\xba\xa0\x8f\xe4\xd6\x3b\x84\x0d\xc2\x4e\x58\xdd\x99\x22\x6b\xc9\xbb\x17\x06\xe5\x7d\x66\x43\x62\xf6\xcd\x35\x90\x2e\x41\x8d\x6d\xaa\x09\x73\x95\x0c\xf1\x1d\xde\x4c\x2f\x2d\xf7\xd1\x72\xeb\xef\x48\xc7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"; ushort CLASS sget2Rev(uchar *s) // specific to some Canon Makernotes fields, where they have endian in reverse { if (order == 0x4d4d) /* "II" means little-endian, and we reverse to "MM" - big endian */ return s[0] | s[1] << 8; else /* "MM" means big-endian... */ return s[0] << 8 | s[1]; } #endif ushort CLASS get2() { uchar str[2] = { 0xff,0xff }; fread (str, 1, 2, ifp); return sget2(str); } unsigned CLASS sget4 (uchar *s) { if (order == 0x4949) return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24; else return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3]; } #define sget4(s) sget4((uchar *)s) unsigned CLASS get4() { uchar str[4] = { 0xff,0xff,0xff,0xff }; fread (str, 1, 4, ifp); return sget4(str); } unsigned CLASS getint (int type) { return type == 3 ? get2() : get4(); } float CLASS int_to_float (int i) { union { int i; float f; } u; u.i = i; return u.f; } double CLASS getreal (int type) { union { char c[8]; double d; } u,v; int i, rev; switch (type) { case 3: return (unsigned short) get2(); case 4: return (unsigned int) get4(); case 5: u.d = (unsigned int) get4(); v.d = (unsigned int)get4(); return u.d / (v.d ? v.d : 1); case 8: return (signed short) get2(); case 9: return (signed int) get4(); case 10: u.d = (signed int) get4(); v.d = (signed int)get4(); return u.d / (v.d?v.d:1); case 11: return int_to_float (get4()); case 12: rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234)); for (i=0; i < 8; i++) u.c[i ^ rev] = fgetc(ifp); return u.d; default: return fgetc(ifp); } } void CLASS read_shorts (ushort *pixel, int count) { if (fread (pixel, 2, count, ifp) < count) derror(); if ((order == 0x4949) == (ntohs(0x1234) == 0x1234)) swab ((char*)pixel, (char*)pixel, count*2); } void CLASS cubic_spline (const int *x_, const int *y_, const int len) { float **A, *b, *c, *d, *x, *y; int i, j; A = (float **) calloc (((2*len + 4)*sizeof **A + sizeof *A), 2*len); if (!A) return; A[0] = (float *) (A + 2*len); for (i = 1; i < 2*len; i++) A[i] = A[0] + 2*len*i; y = len + (x = i + (d = i + (c = i + (b = A[0] + i*i)))); for (i = 0; i < len; i++) { x[i] = x_[i] / 65535.0; y[i] = y_[i] / 65535.0; } for (i = len-1; i > 0; i--) { b[i] = (y[i] - y[i-1]) / (x[i] - x[i-1]); d[i-1] = x[i] - x[i-1]; } for (i = 1; i < len-1; i++) { A[i][i] = 2 * (d[i-1] + d[i]); if (i > 1) { A[i][i-1] = d[i-1]; A[i-1][i] = d[i-1]; } A[i][len-1] = 6 * (b[i+1] - b[i]); } for(i = 1; i < len-2; i++) { float v = A[i+1][i] / A[i][i]; for(j = 1; j <= len-1; j++) A[i+1][j] -= v * A[i][j]; } for(i = len-2; i > 0; i--) { float acc = 0; for(j = i; j <= len-2; j++) acc += A[i][j]*c[j]; c[i] = (A[i][len-1] - acc) / A[i][i]; } for (i = 0; i < 0x10000; i++) { float x_out = (float)(i / 65535.0); float y_out = 0; for (j = 0; j < len-1; j++) { if (x[j] <= x_out && x_out <= x[j+1]) { float v = x_out - x[j]; y_out = y[j] + ((y[j+1] - y[j]) / d[j] - (2 * d[j] * c[j] + c[j+1] * d[j])/6) * v + (c[j] * 0.5) * v*v + ((c[j+1] - c[j]) / (6 * d[j])) * v*v*v; } } curve[i] = y_out < 0.0 ? 0 : (y_out >= 1.0 ? 65535 : (ushort)(y_out * 65535.0 + 0.5)); } free (A); } void CLASS canon_600_fixed_wb (int temp) { static const short mul[4][5] = { { 667, 358,397,565,452 }, { 731, 390,367,499,517 }, { 1119, 396,348,448,537 }, { 1399, 485,431,508,688 } }; int lo, hi, i; float frac=0; for (lo=4; --lo; ) if (*mul[lo] <= temp) break; for (hi=0; hi < 3; hi++) if (*mul[hi] >= temp) break; if (lo != hi) frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]); for (i=1; i < 5; i++) pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]); } /* Return values: 0 = white 1 = near white 2 = not white */ int CLASS canon_600_color (int ratio[2], int mar) { int clipped=0, target, miss; if (flash_used) { if (ratio[1] < -104) { ratio[1] = -104; clipped = 1; } if (ratio[1] > 12) { ratio[1] = 12; clipped = 1; } } else { if (ratio[1] < -264 || ratio[1] > 461) return 2; if (ratio[1] < -50) { ratio[1] = -50; clipped = 1; } if (ratio[1] > 307) { ratio[1] = 307; clipped = 1; } } target = flash_used || ratio[1] < 197 ? -38 - (398 * ratio[1] >> 10) : -123 + (48 * ratio[1] >> 10); if (target - mar <= ratio[0] && target + 20 >= ratio[0] && !clipped) return 0; miss = target - ratio[0]; if (abs(miss) >= mar*4) return 2; if (miss < -20) miss = -20; if (miss > mar) miss = mar; ratio[0] = target - miss; return 1; } void CLASS canon_600_auto_wb() { int mar, row, col, i, j, st, count[] = { 0,0 }; int test[8], total[2][8], ratio[2][2], stat[2]; memset (&total, 0, sizeof total); i = canon_ev + 0.5; if (i < 10) mar = 150; else if (i > 12) mar = 20; else mar = 280 - 20 * i; if (flash_used) mar = 80; for (row=14; row < height-14; row+=4) for (col=10; col < width; col+=2) { for (i=0; i < 8; i++) test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] = BAYER(row+(i >> 1),col+(i & 1)); for (i=0; i < 8; i++) if (test[i] < 150 || test[i] > 1500) goto next; for (i=0; i < 4; i++) if (abs(test[i] - test[i+4]) > 50) goto next; for (i=0; i < 2; i++) { for (j=0; j < 4; j+=2) ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j]; stat[i] = canon_600_color (ratio[i], mar); } if ((st = stat[0] | stat[1]) > 1) goto next; for (i=0; i < 2; i++) if (stat[i]) for (j=0; j < 2; j++) test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10; for (i=0; i < 8; i++) total[st][i] += test[i]; count[st]++; next: ; } if (count[0] | count[1]) { st = count[0]*200 < count[1]; for (i=0; i < 4; i++) pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]); } } void CLASS canon_600_coeff() { static const short table[6][12] = { { -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 }, { -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 }, { -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 }, { -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 }, { -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 }, { -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } }; int t=0, i, c; float mc, yc; mc = pre_mul[1] / pre_mul[2]; yc = pre_mul[3] / pre_mul[2]; if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1; if (mc > 1.28 && mc <= 2) { if (yc < 0.8789) t=3; else if (yc <= 2) t=4; } if (flash_used) t=5; for (raw_color = i=0; i < 3; i++) FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0; } void CLASS canon_600_load_raw() { uchar data[1120], *dp; ushort *pix; int irow, row; for (irow=row=0; irow < height; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (data, 1, 1120, ifp) < 1120) derror(); pix = raw_image + row*raw_width; for (dp=data; dp < data+1120; dp+=10, pix+=8) { pix[0] = (dp[0] << 2) + (dp[1] >> 6 ); pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3); pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3); pix[3] = (dp[4] << 2) + (dp[1] & 3); pix[4] = (dp[5] << 2) + (dp[9] & 3); pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3); pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3); pix[7] = (dp[8] << 2) + (dp[9] >> 6 ); } if ((row+=2) > height) row = 1; } } void CLASS canon_600_correct() { int row, col, val; static const short mul[4][2] = { { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } }; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) { if ((val = BAYER(row,col) - black) < 0) val = 0; val = val * mul[row & 3][col & 1] >> 9; BAYER(row,col) = val; } } canon_600_fixed_wb(1311); canon_600_auto_wb(); canon_600_coeff(); maximum = (0x3ff - black) * 1109 >> 9; black = 0; } int CLASS canon_s2is() { unsigned row; for (row=0; row < 100; row++) { fseek (ifp, row*3340 + 3284, SEEK_SET); if (getc(ifp) > 15) return 1; } return 0; } unsigned CLASS getbithuff (int nbits, ushort *huff) { #ifdef LIBRAW_NOTHREADS static unsigned bitbuf=0; static int vbits=0, reset=0; #else #define bitbuf tls->getbits.bitbuf #define vbits tls->getbits.vbits #define reset tls->getbits.reset #endif unsigned c; if (nbits > 25) return 0; if (nbits < 0) return bitbuf = vbits = reset = 0; if (nbits == 0 || vbits < 0) return 0; while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF && !(reset = zero_after_ff && c == 0xff && fgetc(ifp))) { bitbuf = (bitbuf << 8) + (uchar) c; vbits += 8; } c = bitbuf << (32-vbits) >> (32-nbits); if (huff) { vbits -= huff[c] >> 8; c = (uchar) huff[c]; } else vbits -= nbits; if (vbits < 0) derror(); return c; #ifndef LIBRAW_NOTHREADS #undef bitbuf #undef vbits #undef reset #endif } #define getbits(n) getbithuff(n,0) #define gethuff(h) getbithuff(*h,h+1) /* Construct a decode tree according the specification in *source. The first 16 bytes specify how many codes should be 1-bit, 2-bit 3-bit, etc. Bytes after that are the leaf values. For example, if the source is { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, 0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff }, then the code is 00 0x04 010 0x03 011 0x05 100 0x06 101 0x02 1100 0x07 1101 0x01 11100 0x08 11101 0x09 11110 0x00 111110 0x0a 1111110 0x0b 1111111 0xff */ ushort * CLASS make_decoder_ref (const uchar **source) { int max, len, h, i, j; const uchar *count; ushort *huff; count = (*source += 16) - 17; for (max=16; max && !count[max]; max--); huff = (ushort *) calloc (1 + (1 << max), sizeof *huff); merror (huff, "make_decoder()"); huff[0] = max; for (h=len=1; len <= max; len++) for (i=0; i < count[len]; i++, ++*source) for (j=0; j < 1 << (max-len); j++) if (h <= 1 << max) huff[h++] = len << 8 | **source; return huff; } ushort * CLASS make_decoder (const uchar *source) { return make_decoder_ref (&source); } void CLASS crw_init_tables (unsigned table, ushort *huff[2]) { static const uchar first_tree[3][29] = { { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, 0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff }, { 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0, 0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff }, { 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0, 0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff }, }; static const uchar second_tree[3][180] = { { 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139, 0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08, 0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0, 0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42, 0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57, 0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9, 0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98, 0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6, 0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4, 0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7, 0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1, 0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64, 0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba, 0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4, 0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff }, { 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140, 0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06, 0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32, 0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51, 0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26, 0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59, 0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9, 0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99, 0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85, 0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8, 0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a, 0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9, 0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8, 0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8, 0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff }, { 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117, 0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08, 0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22, 0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34, 0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41, 0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48, 0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69, 0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8, 0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94, 0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a, 0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6, 0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62, 0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5, 0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3, 0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff } }; if (table > 2) table = 2; huff[0] = make_decoder ( first_tree[table]); huff[1] = make_decoder (second_tree[table]); } /* Return 0 if the image starts with compressed data, 1 if it starts with uncompressed low-order bits. In Canon compressed data, 0xff is always followed by 0x00. */ int CLASS canon_has_lowbits() { uchar test[0x4000]; int ret=1, i; fseek (ifp, 0, SEEK_SET); fread (test, 1, sizeof test, ifp); for (i=540; i < sizeof test - 1; i++) if (test[i] == 0xff) { if (test[i+1]) return 1; ret=0; } return ret; } void CLASS canon_load_raw() { ushort *pixel, *prow, *huff[2]; int nblocks, lowbits, i, c, row, r, save, val; int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2]; crw_init_tables (tiff_compress, huff); lowbits = canon_has_lowbits(); if (!lowbits) maximum = 0x3ff; fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET); zero_after_ff = 1; getbits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row+=8) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pixel = raw_image + row*raw_width; nblocks = MIN (8, raw_height-row) * raw_width >> 6; for (block=0; block < nblocks; block++) { memset (diffbuf, 0, sizeof diffbuf); for (i=0; i < 64; i++ ) { leaf = gethuff(huff[i > 0]); if (leaf == 0 && i) break; if (leaf == 0xff) continue; i += leaf >> 4; len = leaf & 15; if (len == 0) continue; diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; if (i < 64) diffbuf[i] = diff; } diffbuf[0] += carry; carry = diffbuf[0]; for (i=0; i < 64; i++ ) { if (pnum++ % raw_width == 0) base[0] = base[1] = 512; if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10) derror(); } } if (lowbits) { save = ftell(ifp); fseek (ifp, 26 + row*raw_width/4, SEEK_SET); for (prow=pixel, i=0; i < raw_width*2; i++) { c = fgetc(ifp); for (r=0; r < 8; r+=2, prow++) { val = (*prow << 2) + ((c >> r) & 3); if (raw_width == 2672 && val < 512) val += 2; *prow = val; } } fseek (ifp, save, SEEK_SET); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { FORC(2) free (huff[c]); throw; } #endif FORC(2) free (huff[c]); } //@end COMMON /* Not a full implementation of Lossless JPEG, just enough to decode Canon, Kodak and Adobe DNG images. */ struct jhead { int bits, high, wide, clrs, sraw, psv, restart, vpred[6]; ushort *huff[6], *free[4], *row; }; //@out COMMON int CLASS ljpeg_start (struct jhead *jh, int info_only) { int c, tag, len; uchar data[0x10000]; const uchar *dp; memset (jh, 0, sizeof *jh); jh->restart = INT_MAX; fread (data, 2, 1, ifp); if (data[1] != 0xd8) return 0; do { fread (data, 2, 2, ifp); tag = data[0] << 8 | data[1]; len = (data[2] << 8 | data[3]) - 2; // printf ("\n*** ljpeg_start pos= %llx tag= %x, len= %d", ftell(ifp)-4, tag, len); if (tag <= 0xff00) return 0; fread (data, 1, len, ifp); switch (tag) { case 0xffc3: // start of frame; lossless, Huffman jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3; // printf ("\n*** %x: startraw= %d", tag, jh->sraw); case 0xffc0: // start of frame; baseline jpeg jh->bits = data[0]; jh->high = data[1] << 8 | data[2]; jh->wide = data[3] << 8 | data[4]; jh->clrs = data[5] + jh->sraw; if (!strcmp(model, "EOS 5DS")) { jh->wide = data[1] << 8 | data[2]; jh->high = data[3] << 8 | data[4]; } // printf ("\n*** %x: bits= %d; high= %d; wide= %d; clrs= %d", // tag, jh->bits, jh->high, jh->wide, jh->clrs); if (len == 9 && !dng_version) getc(ifp); break; case 0xffc4: // define Huffman tables if (info_only) break; for (dp = data; dp < data+len && (c = *dp++) < 4; ) jh->free[c] = jh->huff[c] = make_decoder_ref (&dp); break; case 0xffda: // start of scan jh->psv = data[1+data[0]*2]; jh->bits -= data[3+data[0]*2] & 15; break; case 0xffdd: // define restart interval jh->restart = data[0] << 8 | data[1]; } } while (tag != 0xffda); // printf ("\n"); if (info_only) return 1; if (jh->clrs > 6 || !jh->huff[0]) return 0; FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c]; if (jh->sraw) { FORC(4) jh->huff[2+c] = jh->huff[1]; FORC(jh->sraw) jh->huff[1+c] = jh->huff[0]; } jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4); merror (jh->row, "ljpeg_start()"); return zero_after_ff = 1; } void CLASS ljpeg_end (struct jhead *jh) { int c; FORC4 if (jh->free[c]) free (jh->free[c]); free (jh->row); } int CLASS ljpeg_diff (ushort *huff) { int len, diff; if(!huff) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 2); #endif len = gethuff(huff); if (len == 16 && (!dng_version || dng_version >= 0x1010000)) return -32768; diff = getbits(len); if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; return diff; } ushort * CLASS ljpeg_row (int jrow, struct jhead *jh) { int col, c, diff, pred, spred=0; ushort mark=0, *row[3]; if (jrow * jh->wide % jh->restart == 0) { FORC(6) jh->vpred[c] = 1 << (jh->bits-1); if (jrow) { fseek (ifp, -2, SEEK_CUR); do mark = (mark << 8) + (c = fgetc(ifp)); while (c != EOF && mark >> 4 != 0xffd); } getbits(-1); } FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1); for (col=0; col < jh->wide; col++) FORC(jh->clrs) { diff = ljpeg_diff (jh->huff[c]); if (jh->sraw && c <= jh->sraw && (col | c)) pred = spred; else if (col) pred = row[0][-jh->clrs]; else pred = (jh->vpred[c] += diff) - diff; if (jrow && col) switch (jh->psv) { case 1: break; case 2: pred = row[1][0]; break; case 3: pred = row[1][-jh->clrs]; break; case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break; case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break; case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break; case 7: pred = (pred + row[1][0]) >> 1; break; default: pred = 0; } if ((**row = pred + diff) >> jh->bits) derror(); if (c <= jh->sraw) spred = **row; row[0]++; row[1]++; } return row[2]; } void CLASS lossless_jpeg_load_raw() { int jwide, jrow, jcol, val, jidx, i, j, row=0, col=0; struct jhead jh; ushort *rp; // printf ("\n*** lossless_jpeg_load_raw\n"); if (!ljpeg_start (&jh, 0)) return; if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 2); #endif jwide = jh.wide * jh.clrs; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (jrow=0; jrow < jh.high; jrow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif rp = ljpeg_row (jrow, &jh); if (load_flags & 1) row = jrow & 1 ? height-1-jrow/2 : jrow/2; for (jcol=0; jcol < jwide; jcol++) { val = curve[*rp++]; if (cr2_slice[0]) { jidx = jrow*jwide + jcol; i = jidx / (cr2_slice[1]*jh.high); if ((j = i >= cr2_slice[0])) i = cr2_slice[0]; jidx -= i * (cr2_slice[1]*jh.high); row = jidx / cr2_slice[1+j]; col = jidx % cr2_slice[1+j] + i*cr2_slice[1]; } if (raw_width == 3984 && (col -= 2) < 0) col += (row--,raw_width); if(row>raw_height) #ifdef LIBRAW_LIBRARY_BUILD throw LIBRAW_EXCEPTION_IO_CORRUPT; #else longjmp (failure, 3); #endif if ((unsigned) row < raw_height) RAW(row,col) = val; if (++col >= raw_width) col = (row++,0); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw; } #endif ljpeg_end (&jh); } void CLASS canon_sraw_load_raw() { struct jhead jh; short *rp=0, (*ip)[4]; int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c; int v[3]={0,0,0}, ver, hue; char *cp; if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return; jwide = (jh.wide >>= 1) * jh.clrs; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (ecol=slice=0; slice <= cr2_slice[0]; slice++) { scol = ecol; ecol += cr2_slice[1] * 2 / jh.clrs; if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2; for (row=0; row < height; row += (jh.clrs >> 1) - 1) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif ip = (short (*)[4]) image + row*width; for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) { if ((jcol %= jwide) == 0) rp = (short *) ljpeg_row (jrow++, &jh); if (col >= width) continue; #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sraw_ycc>=2) { FORC (jh.clrs-2) { ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192; } ip[col][1] = rp[jcol+jh.clrs-2] - 8192; ip[col][2] = rp[jcol+jh.clrs-1] - 8192; } else if(imgdata.params.sraw_ycc) { FORC (jh.clrs-2) ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col][1] = rp[jcol+jh.clrs-2] - 8192; ip[col][2] = rp[jcol+jh.clrs-1] - 8192; } else #endif { FORC (jh.clrs-2) ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c]; ip[col][1] = rp[jcol+jh.clrs-2] - 16384; ip[col][2] = rp[jcol+jh.clrs-1] - 16384; } } } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sraw_ycc>=2) { ljpeg_end (&jh); maximum = 0x3fff; return; } #endif #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (cp=model2; *cp && !isdigit(*cp); cp++); sscanf (cp, "%d.%d.%d", v, v+1, v+2); ver = (v[0]*1000 + v[1])*1000 + v[2]; hue = (jh.sraw+1) << 2; if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006)) hue = jh.sraw << 1; ip = (short (*)[4]) image; rp = ip[0]; for (row=0; row < height; row++, ip+=width) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (row & (jh.sraw >> 1)) for (col=0; col < width; col+=2) for (c=1; c < 3; c++) if (row == height-1) ip[col][c] = ip[col-width][c]; else ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1; for (col=1; col < width; col+=2) for (c=1; c < 3; c++) if (col == width-1) ip[col][c] = ip[col-1][c]; else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1; } #ifdef LIBRAW_LIBRARY_BUILD if(!imgdata.params.sraw_ycc) #endif for ( ; rp < ip[0]; rp+=4) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (unique_id == 0x80000218 || unique_id == 0x80000250 || unique_id == 0x80000261 || unique_id == 0x80000281 || unique_id == 0x80000287) { rp[1] = (rp[1] << 2) + hue; rp[2] = (rp[2] << 2) + hue; pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14); pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14); pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14); } else { if (unique_id < 0x80000218) rp[0] -= 512; pix[0] = rp[0] + rp[2]; pix[2] = rp[0] + rp[1]; pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12); } FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif ljpeg_end (&jh); maximum = 0x3fff; } void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp) { int c; if (is_raw == 2 && shot_select) (*rp)++; if (raw_image) { if (row < raw_height && col < raw_width) RAW(row,col) = curve[**rp]; *rp += is_raw; } else { if (row < height && col < width) FORC(tiff_samples) image[row*width+col][c] = curve[(*rp)[c]]; *rp += tiff_samples; } if (is_raw == 2 && shot_select) (*rp)--; } void CLASS lossless_dng_load_raw() { unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col; struct jhead jh; ushort *rp; while (trow < raw_height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif save = ftell(ifp); if (tile_length < INT_MAX) fseek (ifp, get4(), SEEK_SET); if (!ljpeg_start (&jh, 0)) break; jwide = jh.wide; if (filters) jwide *= jh.clrs; jwide /= is_raw; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=col=jrow=0; jrow < jh.high; jrow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif rp = ljpeg_row (jrow, &jh); for (jcol=0; jcol < jwide; jcol++) { adobe_copy_pixel (trow+row, tcol+col, &rp); if (++col >= tile_width || col >= raw_width) row += 1 + (col = 0); } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { ljpeg_end (&jh); throw ; } #endif fseek (ifp, save+4, SEEK_SET); if ((tcol += tile_width) >= raw_width) trow += tile_length + (tcol = 0); ljpeg_end (&jh); } } void CLASS packed_dng_load_raw() { ushort *pixel, *rp; int row, col; pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel); merror (pixel, "packed_dng_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (tiff_bps == 16) read_shorts (pixel, raw_width * tiff_samples); else { getbits(-1); for (col=0; col < raw_width * tiff_samples; col++) pixel[col] = getbits(tiff_bps); } for (rp=pixel, col=0; col < raw_width; col++) adobe_copy_pixel (row, col, &rp); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { free (pixel); throw ; } #endif free (pixel); } void CLASS pentax_load_raw() { ushort bit[2][15], huff[4097]; int dep, row, col, diff, c, i; ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2]; fseek (ifp, meta_offset, SEEK_SET); dep = (get2() + 12) & 15; fseek (ifp, 12, SEEK_CUR); FORC(dep) bit[0][c] = get2(); FORC(dep) bit[1][c] = fgetc(ifp); FORC(dep) for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); ) huff[++i] = bit[1][c] << 8 | c; huff[0] = 12; fseek (ifp, data_offset, SEEK_SET); getbits(-1); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { diff = ljpeg_diff (huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; RAW(row,col) = hpred[col & 1]; if (hpred[col & 1] >> tiff_bps) derror(); } } } #ifdef LIBRAW_LIBRARY_BUILD void CLASS nikon_coolscan_load_raw() { int bufsize = width*3*tiff_bps/8; if(tiff_bps <= 8) gamma_curve(1.0/imgdata.params.coolscan_nef_gamma,0.,1,255); else gamma_curve(1.0/imgdata.params.coolscan_nef_gamma,0.,1,65535); fseek (ifp, data_offset, SEEK_SET); unsigned char *buf = (unsigned char*)malloc(bufsize); unsigned short *ubuf = (unsigned short *)buf; for(int row = 0; row < raw_height; row++) { int red = fread (buf, 1, bufsize, ifp); unsigned short (*ip)[4] = (unsigned short (*)[4]) image + row*width; if(tiff_bps <= 8) for(int col=0; col<width;col++) { ip[col][0] = curve[buf[col*3]]; ip[col][1] = curve[buf[col*3+1]]; ip[col][2] = curve[buf[col*3+2]]; ip[col][3]=0; } else for(int col=0; col<width;col++) { ip[col][0] = curve[ubuf[col*3]]; ip[col][1] = curve[ubuf[col*3+1]]; ip[col][2] = curve[ubuf[col*3+2]]; ip[col][3]=0; } } free(buf); } #endif void CLASS nikon_load_raw() { static const uchar nikon_tree[][32] = { { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */ 5,4,3,6,2,7,1,0,8,9,11,10,12 }, { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */ 0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 }, { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */ 5,4,6,3,7,2,8,1,9,0,10,11,12 }, { 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */ 5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 }, { 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */ 8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 }, { 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */ 7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } }; ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize; int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff; fseek (ifp, meta_offset, SEEK_SET); ver0 = fgetc(ifp); ver1 = fgetc(ifp); if (ver0 == 0x49 || ver1 == 0x58) fseek (ifp, 2110, SEEK_CUR); if (ver0 == 0x46) tree = 2; if (tiff_bps == 14) tree += 3; read_shorts (vpred[0], 4); max = 1 << tiff_bps & 0x7fff; if ((csize = get2()) > 1) step = max / (csize-1); if (ver0 == 0x44 && ver1 == 0x20 && step > 0) { for (i=0; i < csize; i++) curve[i*step] = get2(); for (i=0; i < max; i++) curve[i] = ( curve[i-i%step]*(step-i%step) + curve[i-i%step+step]*(i%step) ) / step; fseek (ifp, meta_offset+562, SEEK_SET); split = get2(); } else if (ver0 != 0x46 && csize <= 0x4001) read_shorts (curve, max=csize); while (curve[max-2] == curve[max-1]) max--; huff = make_decoder (nikon_tree[tree]); fseek (ifp, data_offset, SEEK_SET); getbits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (min=row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (split && row == split) { free (huff); huff = make_decoder (nikon_tree[tree+1]); max += (min = 16) << 1; } for (col=0; col < raw_width; col++) { i = gethuff(huff); len = i & 15; shl = i >> 4; diff = ((getbits(len-shl) << 1) + 1) << shl >> 1; if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - !shl; if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; if ((ushort)(hpred[col & 1] + min) >= max) derror(); RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { free (huff); throw; } #endif free (huff); } void CLASS nikon_yuv_load_raw() { int row, col, yuv[4], rgb[3], b, c; UINT64 bitbuf=0; for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { if (!(b = col & 1)) { bitbuf = 0; FORC(6) bitbuf |= (UINT64) fgetc(ifp) << c*8; FORC(4) yuv[c] = (bitbuf >> c*12 & 0xfff) - (c >> 1 << 11); } rgb[0] = yuv[b] + 1.370705*yuv[3]; rgb[1] = yuv[b] - 0.337633*yuv[2] - 0.698001*yuv[3]; rgb[2] = yuv[b] + 1.732446*yuv[2]; FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,0xfff)] / cam_mul[c]; } } } /* Returns 1 for a Coolpix 995, 0 for anything else. */ int CLASS nikon_e995() { int i, histo[256]; const uchar often[] = { 0x00, 0x55, 0xaa, 0xff }; memset (histo, 0, sizeof histo); fseek (ifp, -2000, SEEK_END); for (i=0; i < 2000; i++) histo[fgetc(ifp)]++; for (i=0; i < 4; i++) if (histo[often[i]] < 200) return 0; return 1; } /* Returns 1 for a Coolpix 2100, 0 for anything else. */ int CLASS nikon_e2100() { uchar t[12]; int i; fseek (ifp, 0, SEEK_SET); for (i=0; i < 1024; i++) { fread (t, 1, 12, ifp); if (((t[2] & t[4] & t[7] & t[9]) >> 4 & t[1] & t[6] & t[8] & t[11] & 3) != 3) return 0; } return 1; } void CLASS nikon_3700() { int bits, i; uchar dp[24]; static const struct { int bits; char t_make[12], t_model[15]; } table[] = { { 0x00, "Pentax", "Optio 33WR" }, { 0x03, "Nikon", "E3200" }, { 0x32, "Nikon", "E3700" }, { 0x33, "Olympus", "C740UZ" } }; fseek (ifp, 3072, SEEK_SET); fread (dp, 1, 24, ifp); bits = (dp[8] & 3) << 4 | (dp[20] & 3); for (i=0; i < sizeof table / sizeof *table; i++) if (bits == table[i].bits) { strcpy (make, table[i].t_make ); strcpy (model, table[i].t_model); } } /* Separates a Minolta DiMAGE Z2 from a Nikon E4300. */ int CLASS minolta_z2() { int i, nz; char tail[424]; fseek (ifp, -sizeof tail, SEEK_END); fread (tail, 1, sizeof tail, ifp); for (nz=i=0; i < sizeof tail; i++) if (tail[i]) nz++; return nz > 20; } //@end COMMON void CLASS jpeg_thumb(); //@out COMMON void CLASS ppm_thumb() { char *thumb; thumb_length = thumb_width*thumb_height*3; thumb = (char *) malloc (thumb_length); merror (thumb, "ppm_thumb()"); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); fread (thumb, 1, thumb_length, ifp); fwrite (thumb, 1, thumb_length, ofp); free (thumb); } void CLASS ppm16_thumb() { int i; char *thumb; thumb_length = thumb_width*thumb_height*3; thumb = (char *) calloc (thumb_length, 2); merror (thumb, "ppm16_thumb()"); read_shorts ((ushort *) thumb, thumb_length); for (i=0; i < thumb_length; i++) thumb[i] = ((ushort *) thumb)[i] >> 8; fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); fwrite (thumb, 1, thumb_length, ofp); free (thumb); } void CLASS layer_thumb() { int i, c; char *thumb, map[][4] = { "012","102" }; colors = thumb_misc >> 5 & 7; thumb_length = thumb_width*thumb_height; thumb = (char *) calloc (colors, thumb_length); merror (thumb, "layer_thumb()"); fprintf (ofp, "P%d\n%d %d\n255\n", 5 + (colors >> 1), thumb_width, thumb_height); fread (thumb, thumb_length, colors, ifp); for (i=0; i < thumb_length; i++) FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp); free (thumb); } void CLASS rollei_thumb() { unsigned i; ushort *thumb; thumb_length = thumb_width * thumb_height; thumb = (ushort *) calloc (thumb_length, 2); merror (thumb, "rollei_thumb()"); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); read_shorts (thumb, thumb_length); for (i=0; i < thumb_length; i++) { putc (thumb[i] << 3, ofp); putc (thumb[i] >> 5 << 2, ofp); putc (thumb[i] >> 11 << 3, ofp); } free (thumb); } void CLASS rollei_load_raw() { uchar pixel[10]; unsigned iten=0, isix, i, buffer=0, todo[16]; isix = raw_width * raw_height * 5 / 8; while (fread (pixel, 1, 10, ifp) == 10) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (i=0; i < 10; i+=2) { todo[i] = iten++; todo[i+1] = pixel[i] << 8 | pixel[i+1]; buffer = pixel[i] >> 2 | buffer << 6; } for ( ; i < 16; i+=2) { todo[i] = isix++; todo[i+1] = buffer >> (14-i)*5; } for (i=0; i < 16; i+=2) raw_image[todo[i]] = (todo[i+1] & 0x3ff); } maximum = 0x3ff; } int CLASS raw (unsigned row, unsigned col) { return (row < raw_height && col < raw_width) ? RAW(row,col) : 0; } void CLASS phase_one_flat_field (int is_float, int nc) { ushort head[8]; unsigned wide, high, y, x, c, rend, cend, row, col; float *mrow, num, mult[4]; read_shorts (head, 8); if (head[2] * head[3] * head[4] * head[5] == 0) return; wide = head[2] / head[4] + (head[2] % head[4] != 0); high = head[3] / head[5] + (head[3] % head[5] != 0); mrow = (float *) calloc (nc*wide, sizeof *mrow); merror (mrow, "phase_one_flat_field()"); for (y=0; y < high; y++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (x=0; x < wide; x++) for (c=0; c < nc; c+=2) { num = is_float ? getreal(11) : get2()/32768.0; if (y==0) mrow[c*wide+x] = num; else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5]; } if (y==0) continue; rend = head[1] + y*head[5]; for (row = rend-head[5]; row < raw_height && row < rend && row < head[1]+head[3]-head[5]; row++) { for (x=1; x < wide; x++) { for (c=0; c < nc; c+=2) { mult[c] = mrow[c*wide+x-1]; mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4]; } cend = head[0] + x*head[4]; for (col = cend-head[4]; col < raw_width && col < cend && col < head[0]+head[2]-head[4]; col++) { c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0; if (!(c & 1)) { c = RAW(row,col) * mult[c]; RAW(row,col) = LIM(c,0,65535); } for (c=0; c < nc; c+=2) mult[c] += mult[c+1]; } } for (x=0; x < wide; x++) for (c=0; c < nc; c+=2) mrow[c*wide+x] += mrow[(c+1)*wide+x]; } } free (mrow); } int CLASS phase_one_correct() { unsigned entries, tag, data, save, col, row, type; int len, i, j, k, cip, val[4], dev[4], sum, max; int head[9], diff, mindiff=INT_MAX, off_412=0; static const signed char dir[12][2] = { {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0}, {-2,-2}, {-2,2}, {2,-2}, {2,2} }; float poly[8], num, cfrac, frac, mult[2], *yval[2]; ushort *xval[2]; int qmult_applied = 0, qlin_applied = 0; if (half_size || !meta_length) return 0; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Phase One correction...\n")); #endif fseek (ifp, meta_offset, SEEK_SET); order = get2(); fseek (ifp, 6, SEEK_CUR); fseek (ifp, meta_offset+get4(), SEEK_SET); entries = get4(); get4(); #ifdef LIBRAW_LIBRARY_BUILD try { #endif while (entries--) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif tag = get4(); len = get4(); data = get4(); save = ftell(ifp); fseek (ifp, meta_offset+data, SEEK_SET); if (tag == 0x419) { /* Polynomial curve */ for (get4(), i=0; i < 8; i++) poly[i] = getreal(11); poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1; for (i=0; i < 0x10000; i++) { num = (poly[5]*i + poly[3])*i + poly[1]; curve[i] = LIM(num,0,65535); } goto apply; /* apply to right half */ } else if (tag == 0x41a) { /* Polynomial curve */ for (i=0; i < 4; i++) poly[i] = getreal(11); for (i=0; i < 0x10000; i++) { for (num=0, j=4; j--; ) num = num * i + poly[j]; curve[i] = LIM(num+i,0,65535); } apply: /* apply to whole image */ for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col = (tag & 1)*ph1.split_col; col < raw_width; col++) RAW(row,col) = curve[RAW(row,col)]; } } else if (tag == 0x400) { /* Sensor defects */ while ((len -= 8) >= 0) { col = get2(); row = get2(); type = get2(); get2(); if (col >= raw_width) continue; if (type == 131 || type == 137) /* Bad column */ for (row=0; row < raw_height; row++) if (FC(row-top_margin,col-left_margin) == 1) { for (sum=i=0; i < 4; i++) sum += val[i] = raw (row+dir[i][0], col+dir[i][1]); for (max=i=0; i < 4; i++) { dev[i] = abs((val[i] << 2) - sum); if (dev[max] < dev[i]) max = i; } RAW(row,col) = (sum - val[max])/3.0 + 0.5; } else { for (sum=0, i=8; i < 12; i++) sum += raw (row+dir[i][0], col+dir[i][1]); RAW(row,col) = 0.5 + sum * 0.0732233 + (raw(row,col-2) + raw(row,col+2)) * 0.3535534; } else if (type == 129) { /* Bad pixel */ if (row >= raw_height) continue; j = (FC(row-top_margin,col-left_margin) != 1) * 4; for (sum=0, i=j; i < j+8; i++) sum += raw (row+dir[i][0], col+dir[i][1]); RAW(row,col) = (sum + 4) >> 3; } } } else if (tag == 0x401) { /* All-color flat fields */ phase_one_flat_field (1, 2); } else if (tag == 0x416 || tag == 0x410) { phase_one_flat_field (0, 2); } else if (tag == 0x40b) { /* Red+blue flat field */ phase_one_flat_field (0, 4); } else if (tag == 0x412) { fseek (ifp, 36, SEEK_CUR); diff = abs (get2() - ph1.tag_21a); if (mindiff > diff) { mindiff = diff; off_412 = ftell(ifp) - 38; } } else if (tag == 0x41f && !qlin_applied) { /* Quadrant linearization */ ushort lc[2][2][16], ref[16]; int qr, qc; for (qr = 0; qr < 2; qr++) for (qc = 0; qc < 2; qc++) for (i = 0; i < 16; i++) lc[qr][qc][i] = (ushort)get4(); for (i = 0; i < 16; i++) { int v = 0; for (qr = 0; qr < 2; qr++) for (qc = 0; qc < 2; qc++) v += lc[qr][qc][i]; ref[i] = (v + 2) >> 2; } for (qr = 0; qr < 2; qr++) { for (qc = 0; qc < 2; qc++) { int cx[19], cf[19]; for (i = 0; i < 16; i++) { cx[1+i] = lc[qr][qc][i]; cf[1+i] = ref[i]; } cx[0] = cf[0] = 0; cx[17] = cf[17] = ((unsigned int)ref[15] * 65535) / lc[qr][qc][15]; cf[18] = cx[18] = 65535; cubic_spline(cx, cf, 19); for (row = (qr ? ph1.split_row : 0); row < (qr ? raw_height : ph1.split_row); row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col = (qc ? ph1.split_col : 0); col < (qc ? raw_width : ph1.split_col); col++) RAW(row,col) = curve[RAW(row,col)]; } } } qlin_applied = 1; } else if (tag == 0x41e && !qmult_applied) { /* Quadrant multipliers */ float qmult[2][2] = { { 1, 1 }, { 1, 1 } }; get4(); get4(); get4(); get4(); qmult[0][0] = 1.0 + getreal(11); get4(); get4(); get4(); get4(); get4(); qmult[0][1] = 1.0 + getreal(11); get4(); get4(); get4(); qmult[1][0] = 1.0 + getreal(11); get4(); get4(); get4(); qmult[1][1] = 1.0 + getreal(11); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { i = qmult[row >= ph1.split_row][col >= ph1.split_col] * RAW(row,col); RAW(row,col) = LIM(i,0,65535); } } qmult_applied = 1; } else if (tag == 0x431 && !qmult_applied) { /* Quadrant combined */ ushort lc[2][2][7], ref[7]; int qr, qc; for (i = 0; i < 7; i++) ref[i] = (ushort)get4(); for (qr = 0; qr < 2; qr++) for (qc = 0; qc < 2; qc++) for (i = 0; i < 7; i++) lc[qr][qc][i] = (ushort)get4(); for (qr = 0; qr < 2; qr++) { for (qc = 0; qc < 2; qc++) { int cx[9], cf[9]; for (i = 0; i < 7; i++) { cx[1+i] = ref[i]; cf[1+i] = ((unsigned int)ref[i] * lc[qr][qc][i]) / 10000; } cx[0] = cf[0] = 0; cx[8] = cf[8] = 65535; cubic_spline(cx, cf, 9); for (row = (qr ? ph1.split_row : 0); row < (qr ? raw_height : ph1.split_row); row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col = (qc ? ph1.split_col : 0); col < (qc ? raw_width : ph1.split_col); col++) RAW(row,col) = curve[RAW(row,col)]; } } } qmult_applied = 1; qlin_applied = 1; } fseek (ifp, save, SEEK_SET); } if (off_412) { fseek (ifp, off_412, SEEK_SET); for (i=0; i < 9; i++) head[i] = get4() & 0x7fff; yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6); merror (yval[0], "phase_one_correct()"); yval[1] = (float *) (yval[0] + head[1]*head[3]); xval[0] = (ushort *) (yval[1] + head[2]*head[4]); xval[1] = (ushort *) (xval[0] + head[1]*head[3]); get2(); for (i=0; i < 2; i++) for (j=0; j < head[i+1]*head[i+3]; j++) yval[i][j] = getreal(11); for (i=0; i < 2; i++) for (j=0; j < head[i+1]*head[i+3]; j++) xval[i][j] = get2(); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { cfrac = (float) col * head[3] / raw_width; cfrac -= cip = cfrac; num = RAW(row,col) * 0.5; for (i=cip; i < cip+2; i++) { for (k=j=0; j < head[1]; j++) if (num < xval[0][k = head[1]*i+j]) break; frac = (j == 0 || j == head[1]) ? 0 : (xval[0][k] - num) / (xval[0][k] - xval[0][k-1]); mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac); } i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2; RAW(row,col) = LIM(i,0,65535); } } free (yval[0]); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { return LIBRAW_CANCELLED_BY_CALLBACK; } #endif } void CLASS phase_one_load_raw() { int a, b, i; ushort akey, bkey, t_mask; fseek (ifp, ph1.key_off, SEEK_SET); akey = get2(); bkey = get2(); t_mask = ph1.format == 1 ? 0x5555:0x1354; #ifdef LIBRAW_LIBRARY_BUILD if (ph1.black_col || ph1.black_row ) { imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort)); merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw()"); imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort)); merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw()"); if (ph1.black_col) { fseek (ifp, ph1.black_col, SEEK_SET); read_shorts ((ushort *)imgdata.rawdata.ph1_cblack[0], raw_height*2); } if (ph1.black_row) { fseek (ifp, ph1.black_row, SEEK_SET); read_shorts ((ushort *) imgdata.rawdata.ph1_rblack[0], raw_width*2); } } #endif fseek (ifp, data_offset, SEEK_SET); read_shorts (raw_image, raw_width*raw_height); if (ph1.format) for (i=0; i < raw_width*raw_height; i+=2) { a = raw_image[i+0] ^ akey; b = raw_image[i+1] ^ bkey; raw_image[i+0] = (a & t_mask) | (b & ~t_mask); raw_image[i+1] = (b & t_mask) | (a & ~t_mask); } } unsigned CLASS ph1_bithuff (int nbits, ushort *huff) { #ifndef LIBRAW_NOTHREADS #define bitbuf tls->ph1_bits.bitbuf #define vbits tls->ph1_bits.vbits #else static UINT64 bitbuf=0; static int vbits=0; #endif unsigned c; if (nbits == -1) return bitbuf = vbits = 0; if (nbits == 0) return 0; if (vbits < nbits) { bitbuf = bitbuf << 32 | get4(); vbits += 32; } c = bitbuf << (64-vbits) >> (64-nbits); if (huff) { vbits -= huff[c] >> 8; return (uchar) huff[c]; } vbits -= nbits; return c; #ifndef LIBRAW_NOTHREADS #undef bitbuf #undef vbits #endif } #define ph1_bits(n) ph1_bithuff(n,0) #define ph1_huff(h) ph1_bithuff(*h,h+1) void CLASS phase_one_load_raw_c() { static const int length[] = { 8,7,6,9,11,10,5,12,14,13 }; int *offset, len[2], pred[2], row, col, i, j; ushort *pixel; short (*c_black)[2], (*r_black)[2]; #ifdef LIBRAW_LIBRARY_BUILD if(ph1.format == 6) throw LIBRAW_EXCEPTION_IO_CORRUPT; #endif pixel = (ushort *) calloc (raw_width*3 + raw_height*4, 2); merror (pixel, "phase_one_load_raw_c()"); offset = (int *) (pixel + raw_width); fseek (ifp, strip_offset, SEEK_SET); for (row=0; row < raw_height; row++) offset[row] = get4(); c_black = (short (*)[2]) (offset + raw_height); fseek (ifp, ph1.black_col, SEEK_SET); if (ph1.black_col) read_shorts ((ushort *) c_black[0], raw_height*2); r_black = c_black + raw_height; fseek (ifp, ph1.black_row, SEEK_SET); if (ph1.black_row) read_shorts ((ushort *) r_black[0], raw_width*2); #ifdef LIBRAW_LIBRARY_BUILD // Copy data to internal copy (ever if not read) if (ph1.black_col || ph1.black_row ) { imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort)); merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw_c()"); memmove(imgdata.rawdata.ph1_cblack,(ushort*)c_black[0],raw_height*2*sizeof(ushort)); imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort)); merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw_c()"); memmove(imgdata.rawdata.ph1_rblack,(ushort*)r_black[0],raw_width*2*sizeof(ushort)); } #endif for (i=0; i < 256; i++) curve[i] = i*i / 3.969 + 0.5; #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, data_offset + offset[row], SEEK_SET); ph1_bits(-1); pred[0] = pred[1] = 0; for (col=0; col < raw_width; col++) { if (col >= (raw_width & -8)) len[0] = len[1] = 14; else if ((col & 7) == 0) for (i=0; i < 2; i++) { for (j=0; j < 5 && !ph1_bits(1); j++); if (j--) len[i] = length[j*2 + ph1_bits(1)]; } if ((i = len[col & 1]) == 14) pixel[col] = pred[col & 1] = ph1_bits(16); else pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1)); if (pred[col & 1] >> 16) derror(); if (ph1.format == 5 && pixel[col] < 256) pixel[col] = curve[pixel[col]]; } for (col=0; col < raw_width; col++) { #ifndef LIBRAW_LIBRARY_BUILD i = (pixel[col] << 2) - ph1.t_black + c_black[row][col >= ph1.split_col] + r_black[col][row >= ph1.split_row]; if (i > 0) RAW(row,col) = i; #else RAW(row,col) = pixel[col] << 2; #endif } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = 0xfffc - ph1.t_black; } void CLASS hasselblad_load_raw() { struct jhead jh; int shot, row, col, *back[5], len[2], diff[12], pred, sh, f, s, c; unsigned upix, urow, ucol; ushort *ip; if (!ljpeg_start (&jh, 0)) return; order = 0x4949; ph1_bits(-1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif back[4] = (int *) calloc (raw_width, 3*sizeof **back); merror (back[4], "hasselblad_load_raw()"); FORC3 back[c] = back[4] + c*raw_width; cblack[6] >>= sh = tiff_samples > 1; shot = LIM(shot_select, 1, tiff_samples) - 1; for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif FORC4 back[(c+3) & 3] = back[c]; for (col=0; col < raw_width; col+=2) { for (s=0; s < tiff_samples*2; s+=2) { FORC(2) len[c] = ph1_huff(jh.huff[0]); FORC(2) { diff[s+c] = ph1_bits(len[c]); if ((diff[s+c] & (1 << (len[c]-1))) == 0) diff[s+c] -= (1 << len[c]) - 1; if (diff[s+c] == 65535) diff[s+c] = -32768; } } for (s=col; s < col+2; s++) { pred = 0x8000 + load_flags; if (col) pred = back[2][s-2]; if (col && row > 1) switch (jh.psv) { case 11: pred += back[0][s]/2 - back[0][s-2]/2; break; } f = (row & 1)*3 ^ ((col+s) & 1); FORC (tiff_samples) { pred += diff[(s & 1)*tiff_samples+c]; upix = pred >> sh & 0xffff; if (raw_image && c == shot) RAW(row,s) = upix; if (image) { urow = row-top_margin + (c & 1); ucol = col-left_margin - ((c >> 1) & 1); ip = &image[urow*width+ucol][f]; if (urow < height && ucol < width) *ip = c < 4 ? upix : (*ip + upix) >> 1; } } back[2][s] = pred; } } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...){ free (back[4]); ljpeg_end (&jh); throw; } #endif free (back[4]); ljpeg_end (&jh); if (image) mix_green = 1; } void CLASS leaf_hdr_load_raw() { ushort *pixel=0; unsigned tile=0, r, c, row, col; if (!filters) { pixel = (ushort *) calloc (raw_width, sizeof *pixel); merror (pixel, "leaf_hdr_load_raw()"); } #ifdef LIBRAW_LIBRARY_BUILD try { #endif FORC(tiff_samples) for (r=0; r < raw_height; r++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (r % tile_length == 0) { fseek (ifp, data_offset + 4*tile++, SEEK_SET); fseek (ifp, get4(), SEEK_SET); } if (filters && c != shot_select) continue; if (filters) pixel = raw_image + r*raw_width; read_shorts (pixel, raw_width); if (!filters && (row = r - top_margin) < height) for (col=0; col < width; col++) image[row*width+col][c] = pixel[col+left_margin]; } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { if(!filters) free(pixel); throw; } #endif if (!filters) { maximum = 0xffff; raw_color = 1; free (pixel); } } void CLASS unpacked_load_raw() { int row, col, bits=0; while (1 << ++bits < maximum); read_shorts (raw_image, raw_width*raw_height); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) if ((RAW(row,col) >>= load_flags) >> bits && (unsigned) (row-top_margin) < height && (unsigned) (col-left_margin) < width) derror(); } } void CLASS sinar_4shot_load_raw() { ushort *pixel; unsigned shot, row, col, r, c; if (raw_image) { shot = LIM (shot_select, 1, 4) - 1; fseek (ifp, data_offset + shot*4, SEEK_SET); fseek (ifp, get4(), SEEK_SET); unpacked_load_raw(); return; } pixel = (ushort *) calloc (raw_width, sizeof *pixel); merror (pixel, "sinar_4shot_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (shot=0; shot < 4; shot++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, data_offset + shot*4, SEEK_SET); fseek (ifp, get4(), SEEK_SET); for (row=0; row < raw_height; row++) { read_shorts (pixel, raw_width); if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue; for (col=0; col < raw_width; col++) { if ((c = col-left_margin - (shot & 1)) >= width) continue; image[r*width+c][(row & 1)*3 ^ (~col & 1)] = pixel[col]; } } } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { free(pixel); throw; } #endif free (pixel); mix_green = 1; } void CLASS imacon_full_load_raw() { int row, col; if (!image) return; #ifdef LIBRAW_LIBRARY_BUILD unsigned short *buf = (unsigned short *)malloc(width*3*sizeof(unsigned short)); merror(buf,"imacon_full_load_raw"); #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); read_shorts(buf,width*3); unsigned short (*rowp)[4] = &image[row*width]; for (col=0; col < width; col++) { rowp[col][0]=buf[col*3]; rowp[col][1]=buf[col*3+1]; rowp[col][2]=buf[col*3+2]; rowp[col][3]=0; } #else for (col=0; col < width; col++) read_shorts (image[row*width+col], 3); #endif } #ifdef LIBRAW_LIBRARY_BUILD free(buf); #endif } void CLASS packed_load_raw() { int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i; UINT64 bitbuf=0; bwide = raw_width * tiff_bps / 8; bwide += bwide & load_flags >> 7; rbits = bwide * 8 - raw_width * tiff_bps; if (load_flags & 1) bwide = bwide * 16 / 15; bite = 8 + (load_flags & 24); half = (raw_height+1) >> 1; for (irow=0; irow < raw_height; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif row = irow; if (load_flags & 2 && (row = irow % half * 2 + irow / half) == 1 && load_flags & 4) { if (vbits=0, tiff_compress) fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET); else { fseek (ifp, 0, SEEK_END); fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET); } } for (col=0; col < raw_width; col++) { for (vbits -= tiff_bps; vbits < 0; vbits += bite) { bitbuf <<= bite; for (i=0; i < bite; i+=8) bitbuf |= (unsigned) (fgetc(ifp) << i); } val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps); RAW(row,col ^ (load_flags >> 6 & 1)) = val; if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) && row < height+top_margin && col < width+left_margin) derror(); } vbits -= rbits; } } void CLASS nokia_load_raw() { uchar *data, *dp; int rev, dwide, row, col, c; double sum[]={0,0}; rev = 3 * (order == 0x4949); dwide = (raw_width * 5 + 1) / 4; data = (uchar *) malloc (dwide*2); merror (data, "nokia_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (data+dwide, 1, dwide, ifp) < dwide) derror(); FORC(dwide) data[c] = data[dwide+(c ^ rev)]; for (dp=data, col=0; col < raw_width; dp+=5, col+=4) FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3); } #ifdef LIBRAW_LIBRARY_BUILD } catch (...){ free (data); throw; } #endif free (data); maximum = 0x3ff; if (strcmp(make,"OmniVision")) return; row = raw_height/2; FORC(width-1) { sum[ c & 1] += SQR(RAW(row,c)-RAW(row+1,c+1)); sum[~c & 1] += SQR(RAW(row+1,c)-RAW(row,c+1)); } if (sum[1] > sum[0]) filters = 0x4b4b4b4b; } void CLASS android_tight_load_raw() { uchar *data, *dp; int bwide, row, col, c; bwide = -(-5*raw_width >> 5) << 3; data = (uchar *) malloc (bwide); merror (data, "android_tight_load_raw()"); for (row=0; row < raw_height; row++) { if (fread (data, 1, bwide, ifp) < bwide) derror(); for (dp=data, col=0; col < raw_width; dp+=5, col+=4) FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3); } free (data); } void CLASS android_loose_load_raw() { uchar *data, *dp; int bwide, row, col, c; UINT64 bitbuf=0; bwide = (raw_width+5)/6 << 3; data = (uchar *) malloc (bwide); merror (data, "android_loose_load_raw()"); for (row=0; row < raw_height; row++) { if (fread (data, 1, bwide, ifp) < bwide) derror(); for (dp=data, col=0; col < raw_width; dp+=8, col+=6) { FORC(8) bitbuf = (bitbuf << 8) | dp[c^7]; FORC(6) RAW(row,col+c) = (bitbuf >> c*10) & 0x3ff; } } free (data); } void CLASS canon_rmf_load_raw() { int row, col, bits, orow, ocol, c; #ifdef LIBRAW_LIBRARY_BUILD int *words = (int*)malloc(sizeof(int)*(raw_width/3+1)); merror(words,"canon_rmf_load_raw"); #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); fread(words,sizeof(int),raw_width/3,ifp); for (col=0; col < raw_width-2; col+=3) { bits = words[col/3]; FORC3 { orow = row; if ((ocol = col+c-4) < 0) { ocol += raw_width; if ((orow -= 2) < 0) orow += raw_height; } RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff]; } } #else for (col=0; col < raw_width-2; col+=3) { bits = get4(); FORC3 { orow = row; if ((ocol = col+c-4) < 0) { ocol += raw_width; if ((orow -= 2) < 0) orow += raw_height; } RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff]; } } #endif } #ifdef LIBRAW_LIBRARY_BUILD free(words); #endif maximum = curve[0x3ff]; } unsigned CLASS pana_bits (int nbits) { #ifndef LIBRAW_NOTHREADS #define buf tls->pana_bits.buf #define vbits tls->pana_bits.vbits #else static uchar buf[0x4000]; static int vbits; #endif int byte; if (!nbits) return vbits=0; if (!vbits) { fread (buf+load_flags, 1, 0x4000-load_flags, ifp); fread (buf, 1, load_flags, ifp); } vbits = (vbits - nbits) & 0x1ffff; byte = vbits >> 3 ^ 0x3ff0; return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits); #ifndef LIBRAW_NOTHREADS #undef buf #undef vbits #endif } void CLASS panasonic_load_raw() { int row, col, i, j, sh=0, pred[2], nonz[2]; pana_bits(0); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { if ((i = col % 14) == 0) pred[0] = pred[1] = nonz[0] = nonz[1] = 0; if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2)); if (nonz[i & 1]) { if ((j = pana_bits(8))) { if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4) pred[i & 1] &= ~((~0u) << sh); pred[i & 1] += j << sh; } } else if ((nonz[i & 1] = pana_bits(8)) || i > 11) pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4); if ((RAW(row,col) = pred[col & 1]) > 4098 && col < width) derror(); } } } void CLASS olympus_load_raw() { ushort huff[4096]; int row, col, nbits, sign, low, high, i, c, w, n, nw; int acarry[2][3], *carry, pred, diff; huff[n=0] = 0xc0c; for (i=12; i--; ) FORC(2048 >> i) huff[++n] = (i+1) << 8 | i; fseek (ifp, 7, SEEK_CUR); getbits(-1); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif memset (acarry, 0, sizeof acarry); for (col=0; col < raw_width; col++) { carry = acarry[col & 1]; i = 2 * (carry[2] < 3); for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++); low = (sign = getbits(3)) & 3; sign = sign << 29 >> 31; if ((high = getbithuff(12,huff)) == 12) high = getbits(16-nbits) >> 1; carry[0] = (high << nbits) | getbits(nbits); diff = (carry[0] ^ sign) + carry[1]; carry[1] = (diff*3 + carry[1]) >> 5; carry[2] = carry[0] > 16 ? 0 : carry[2]+1; if (col >= width) continue; if (row < 2 && col < 2) pred = 0; else if (row < 2) pred = RAW(row,col-2); else if (col < 2) pred = RAW(row-2,col); else { w = RAW(row,col-2); n = RAW(row-2,col); nw = RAW(row-2,col-2); if ((w < nw && nw < n) || (n < nw && nw < w)) { if (ABS(w-nw) > 32 || ABS(n-nw) > 32) pred = w + n - nw; else pred = (w + n) >> 1; } else pred = ABS(w-nw) > ABS(n-nw) ? w : n; } if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror(); } } } void CLASS minolta_rd175_load_raw() { uchar pixel[768]; unsigned irow, box, row, col; for (irow=0; irow < 1481; irow++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, 768, ifp) < 768) derror(); box = irow / 82; row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2); switch (irow) { case 1477: case 1479: continue; case 1476: row = 984; break; case 1480: row = 985; break; case 1478: row = 985; box = 1; } if ((box < 12) && (box & 1)) { for (col=0; col < 1533; col++, row ^= 1) if (col != 1) RAW(row,col) = (col+1) & 2 ? pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1; RAW(row,1) = pixel[1] << 1; RAW(row,1533) = pixel[765] << 1; } else for (col=row & 1; col < 1534; col+=2) RAW(row,col) = pixel[col/2] << 1; } maximum = 0xff << 1; } void CLASS quicktake_100_load_raw() { uchar pixel[484][644]; static const short gstep[16] = { -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 }; static const short rstep[6][4] = { { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 }, { -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } }; static const short t_curve[256] = { 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27, 28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53, 54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78, 79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116, 118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155, 158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195, 197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244, 248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322, 326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400, 405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479, 483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643, 654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844, 855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 }; int rb, row, col, sharp, val=0; getbits(-1); memset (pixel, 0x80, sizeof pixel); for (row=2; row < height+2; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=2+(row & 1); col < width+2; col+=2) { val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] + pixel[row][col-2]) >> 2) + gstep[getbits(4)]; pixel[row][col] = val = LIM(val,0,255); if (col < 4) pixel[row][col-2] = pixel[row+1][~row & 1] = val; if (row == 2) pixel[row-1][col+1] = pixel[row-1][col+3] = val; } pixel[row][col] = val; } for (rb=0; rb < 2; rb++) for (row=2+rb; row < height+2; row+=2) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=3-(row & 1); col < width+2; col+=2) { if (row < 4 || col < 4) sharp = 2; else { val = ABS(pixel[row-2][col] - pixel[row][col-2]) + ABS(pixel[row-2][col] - pixel[row-2][col-2]) + ABS(pixel[row][col-2] - pixel[row-2][col-2]); sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 : val < 32 ? 3 : val < 48 ? 4 : 5; } val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1) + rstep[sharp][getbits(2)]; pixel[row][col] = val = LIM(val,0,255); if (row < 4) pixel[row-2][col+2] = val; if (col < 4) pixel[row+2][col-2] = val; } } for (row=2; row < height+2; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=3-(row & 1); col < width+2; col+=2) { val = ((pixel[row][col-1] + (pixel[row][col] << 2) + pixel[row][col+1]) >> 1) - 0x100; pixel[row][col] = LIM(val,0,255); } } for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) RAW(row,col) = t_curve[pixel[row+2][col+2]]; } maximum = 0x3ff; } #define radc_token(tree) ((signed char) getbithuff(8,huff[tree])) #define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--) #define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \ : (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4) #ifdef __GNUC__ # if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) # pragma GCC optimize("no-aggressive-loop-optimizations") # endif #endif void CLASS kodak_radc_load_raw() { static const char src[] = { 1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8, 1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8, 2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8, 2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8, 2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8, 2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8, 2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8, 2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8, 2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4, 2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8, 1,0, 2,2, 2,-2, 1,-3, 1,3, 2,-17, 2,-5, 2,5, 2,17, 2,-7, 2,2, 2,9, 2,18, 2,-18, 2,-9, 2,-2, 2,7, 2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79, 2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76, 2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37 }; ushort huff[19][256]; int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val; short last[3] = { 16,16,16 }, mul[3], buf[3][3][386]; static const ushort pt[] = { 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 }; for (i=2; i < 12; i+=2) for (c=pt[i-2]; c <= pt[i]; c++) curve[c] = (float) (c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5; for (s=i=0; i < sizeof src; i+=2) FORC(256 >> src[i]) huff[0][s++] = src[i] << 8 | (uchar) src[i+1]; s = kodak_cbpp == 243 ? 2 : 3; FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1); getbits(-1); for (i=0; i < sizeof(buf)/sizeof(short); i++) buf[0][0][i] = 2048; for (row=0; row < height; row+=4) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif FORC3 mul[c] = getbits(6); FORC3 { val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c]; s = val > 65564 ? 10:12; x = ~((~0u) << (s-1)); val <<= 12-s; for (i=0; i < sizeof(buf[0])/sizeof(short); i++) buf[c][0][i] = (buf[c][0][i] * val + x) >> s; last[c] = mul[c]; for (r=0; r <= !c; r++) { buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7; for (tree=1, col=width/2; col > 0; ) { if ((tree = radc_token(tree))) { col -= 2; if (tree == 8) FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c]; else FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR; } else do { nreps = (col > 2) ? radc_token(9) + 1 : 1; for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) { col -= 2; FORYX buf[c][y][x] = PREDICTOR; if (rep & 1) { step = radc_token(10) << 4; FORYX buf[c][y][x] += step; } } } while (nreps == 9); } for (y=0; y < 2; y++) for (x=0; x < width/2; x++) { val = (buf[c][y+1][x] << 4) / mul[c]; if (val < 0) val = 0; if (c) RAW(row+y*2+c-1,x*2+2-c) = val; else RAW(row+r*2+y,x*2+y) = val; } memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c); } } for (y=row; y < row+4; y++) for (x=0; x < width; x++) if ((x+y) & 1) { r = x ? x-1 : x+1; s = x+1 < width ? x+1 : x-1; val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2; if (val < 0) val = 0; RAW(y,x) = val; } } for (i=0; i < height*width; i++) raw_image[i] = curve[raw_image[i]]; maximum = 0x3fff; } #undef FORYX #undef PREDICTOR #ifdef NO_JPEG void CLASS kodak_jpeg_load_raw() {} void CLASS lossy_dng_load_raw() {} #else #ifndef LIBRAW_LIBRARY_BUILD METHODDEF(boolean) fill_input_buffer (j_decompress_ptr cinfo) { static uchar jpeg_buffer[4096]; size_t nbytes; nbytes = fread (jpeg_buffer, 1, 4096, ifp); swab (jpeg_buffer, jpeg_buffer, nbytes); cinfo->src->next_input_byte = jpeg_buffer; cinfo->src->bytes_in_buffer = nbytes; return TRUE; } void CLASS kodak_jpeg_load_raw() { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPARRAY buf; JSAMPLE (*pixel)[3]; int row, col; cinfo.err = jpeg_std_error (&jerr); jpeg_create_decompress (&cinfo); jpeg_stdio_src (&cinfo, ifp); cinfo.src->fill_input_buffer = fill_input_buffer; jpeg_read_header (&cinfo, TRUE); jpeg_start_decompress (&cinfo); if ((cinfo.output_width != width ) || (cinfo.output_height*2 != height ) || (cinfo.output_components != 3 )) { fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname); jpeg_destroy_decompress (&cinfo); longjmp (failure, 3); } buf = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1); while (cinfo.output_scanline < cinfo.output_height) { row = cinfo.output_scanline * 2; jpeg_read_scanlines (&cinfo, buf, 1); pixel = (JSAMPLE (*)[3]) buf[0]; for (col=0; col < width; col+=2) { RAW(row+0,col+0) = pixel[col+0][1] << 1; RAW(row+1,col+1) = pixel[col+1][1] << 1; RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0]; RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2]; } } jpeg_finish_decompress (&cinfo); jpeg_destroy_decompress (&cinfo); maximum = 0xff << 1; } #else struct jpegErrorManager { struct jpeg_error_mgr pub; }; static void jpegErrorExit (j_common_ptr cinfo) { jpegErrorManager* myerr = (jpegErrorManager*) cinfo->err; throw LIBRAW_EXCEPTION_DECODE_JPEG; } // LibRaw's Kodak_jpeg_load_raw void CLASS kodak_jpeg_load_raw() { if(data_size < 1) throw LIBRAW_EXCEPTION_DECODE_JPEG; int row, col; jpegErrorManager jerr; struct jpeg_decompress_struct cinfo; cinfo.err = jpeg_std_error(&jerr.pub); jerr.pub.error_exit = jpegErrorExit; unsigned char *jpg_buf = (unsigned char *)malloc(data_size); merror(jpg_buf,"kodak_jpeg_load_raw"); unsigned char *pixel_buf = (unsigned char*) malloc(width*3); jpeg_create_decompress (&cinfo); merror(pixel_buf,"kodak_jpeg_load_raw"); fread(jpg_buf,data_size,1,ifp); swab ((char*)jpg_buf, (char*)jpg_buf, data_size); try { jpeg_mem_src(&cinfo, jpg_buf, data_size); int rc = jpeg_read_header(&cinfo, TRUE); if(rc!=1) throw LIBRAW_EXCEPTION_DECODE_JPEG; jpeg_start_decompress (&cinfo); if ((cinfo.output_width != width ) || (cinfo.output_height*2 != height ) || (cinfo.output_components != 3 )) { throw LIBRAW_EXCEPTION_DECODE_JPEG; } unsigned char *buf[1]; buf[0] = pixel_buf; while (cinfo.output_scanline < cinfo.output_height) { checkCancel(); row = cinfo.output_scanline * 2; jpeg_read_scanlines (&cinfo, buf, 1); unsigned char (*pixel)[3] = (unsigned char (*)[3]) buf[0]; for (col=0; col < width; col+=2) { RAW(row+0,col+0) = pixel[col+0][1] << 1; RAW(row+1,col+1) = pixel[col+1][1] << 1; RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0]; RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2]; } } } catch (...) { jpeg_finish_decompress (&cinfo); jpeg_destroy_decompress (&cinfo); free(jpg_buf); free(pixel_buf); throw; } jpeg_finish_decompress (&cinfo); jpeg_destroy_decompress (&cinfo); free(jpg_buf); free(pixel_buf); maximum = 0xff << 1; } #endif #ifndef LIBRAW_LIBRARY_BUILD void CLASS gamma_curve (double pwr, double ts, int mode, int imax); #endif void CLASS lossy_dng_load_raw() { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPARRAY buf; JSAMPLE (*pixel)[3]; unsigned sorder=order, ntags, opcode, deg, i, j, c; unsigned save=data_offset-4, trow=0, tcol=0, row, col; ushort cur[3][256]; double coeff[9], tot; if (meta_offset) { fseek (ifp, meta_offset, SEEK_SET); order = 0x4d4d; ntags = get4(); while (ntags--) { opcode = get4(); get4(); get4(); if (opcode != 8) { fseek (ifp, get4(), SEEK_CUR); continue; } fseek (ifp, 20, SEEK_CUR); if ((c = get4()) > 2) break; fseek (ifp, 12, SEEK_CUR); if ((deg = get4()) > 8) break; for (i=0; i <= deg && i < 9; i++) coeff[i] = getreal(12); for (i=0; i < 256; i++) { for (tot=j=0; j <= deg; j++) tot += coeff[j] * pow(i/255.0, (int)j); cur[c][i] = tot*0xffff; } } order = sorder; } else { gamma_curve (1/2.4, 12.92, 1, 255); FORC3 memcpy (cur[c], curve, sizeof cur[0]); } cinfo.err = jpeg_std_error (&jerr); jpeg_create_decompress (&cinfo); while (trow < raw_height) { fseek (ifp, save+=4, SEEK_SET); if (tile_length < INT_MAX) fseek (ifp, get4(), SEEK_SET); #ifdef LIBRAW_LIBRARY_BUILD if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1) { jpeg_destroy_decompress(&cinfo); throw LIBRAW_EXCEPTION_DECODE_JPEG; } #else jpeg_stdio_src (&cinfo, ifp); #endif jpeg_read_header (&cinfo, TRUE); jpeg_start_decompress (&cinfo); buf = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1); #ifdef LIBRAW_LIBRARY_BUILD try { #endif while (cinfo.output_scanline < cinfo.output_height && (row = trow + cinfo.output_scanline) < height) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif jpeg_read_scanlines (&cinfo, buf, 1); pixel = (JSAMPLE (*)[3]) buf[0]; for (col=0; col < cinfo.output_width && tcol+col < width; col++) { FORC3 image[row*width+tcol+col][c] = cur[c][pixel[col][c]]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { jpeg_destroy_decompress (&cinfo); throw; } #endif jpeg_abort_decompress (&cinfo); if ((tcol += tile_width) >= raw_width) trow += tile_length + (tcol = 0); } jpeg_destroy_decompress (&cinfo); maximum = 0xffff; } #endif void CLASS kodak_dc120_load_raw() { static const int mul[4] = { 162, 192, 187, 92 }; static const int add[4] = { 0, 636, 424, 212 }; uchar pixel[848]; int row, shift, col; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, 848, ifp) < 848) derror(); shift = row * mul[row & 3] + add[row & 3]; for (col=0; col < width; col++) RAW(row,col) = (ushort) pixel[(col + shift) % 848]; } maximum = 0xff; } void CLASS eight_bit_load_raw() { uchar *pixel; unsigned row, col; pixel = (uchar *) calloc (raw_width, sizeof *pixel); merror (pixel, "eight_bit_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, 1, raw_width, ifp) < raw_width) derror(); for (col=0; col < raw_width; col++) RAW(row,col) = curve[pixel[col]]; } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = curve[0xff]; } void CLASS kodak_c330_load_raw() { uchar *pixel; int row, col, y, cb, cr, rgb[3], c; pixel = (uchar *) calloc (raw_width, 2*sizeof *pixel); merror (pixel, "kodak_c330_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (fread (pixel, raw_width, 2, ifp) < 2) derror(); if (load_flags && (row & 31) == 31) fseek (ifp, raw_width*32, SEEK_CUR); for (col=0; col < width; col++) { y = pixel[col*2]; cb = pixel[(col*2 & -4) | 1] - 128; cr = pixel[(col*2 & -4) | 3] - 128; rgb[1] = y - ((cb + cr + 2) >> 2); rgb[2] = rgb[1] + cb; rgb[0] = rgb[1] + cr; FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = curve[0xff]; } void CLASS kodak_c603_load_raw() { uchar *pixel; int row, col, y, cb, cr, rgb[3], c; pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel); merror (pixel, "kodak_c603_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (~row & 1) if (fread (pixel, raw_width, 3, ifp) < 3) derror(); for (col=0; col < width; col++) { y = pixel[width*2*(row & 1) + col]; cb = pixel[width + (col & -2)] - 128; cr = pixel[width + (col & -2)+1] - 128; rgb[1] = y - ((cb + cr + 2) >> 2); rgb[2] = rgb[1] + cb; rgb[0] = rgb[1] + cr; FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)]; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); maximum = curve[0xff]; } void CLASS kodak_262_load_raw() { static const uchar kodak_tree[2][26] = { { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 }, { 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } }; ushort *huff[2]; uchar *pixel; int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val; FORC(2) huff[c] = make_decoder (kodak_tree[c]); ns = (raw_height+63) >> 5; pixel = (uchar *) malloc (raw_width*32 + ns*4); merror (pixel, "kodak_262_load_raw()"); strip = (int *) (pixel + raw_width*32); order = 0x4d4d; FORC(ns) strip[c] = get4(); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if ((row & 31) == 0) { fseek (ifp, strip[row >> 5], SEEK_SET); getbits(-1); pi = 0; } for (col=0; col < raw_width; col++) { chess = (row + col) & 1; pi1 = chess ? pi-2 : pi-raw_width-1; pi2 = chess ? pi-2*raw_width : pi-raw_width+1; if (col <= chess) pi1 = -1; if (pi1 < 0) pi1 = pi2; if (pi2 < 0) pi2 = pi1; if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2; pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1; pixel[pi] = val = pred + ljpeg_diff (huff[chess]); if (val >> 8) derror(); val = curve[pixel[pi++]]; RAW(row,col) = val; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (pixel); throw; } #endif free (pixel); FORC(2) free (huff[c]); } int CLASS kodak_65000_decode (short *out, int bsize) { uchar c, blen[768]; ushort raw[6]; INT64 bitbuf=0; int save, bits=0, i, j, len, diff; save = ftell(ifp); bsize = (bsize + 3) & -4; for (i=0; i < bsize; i+=2) { c = fgetc(ifp); if ((blen[i ] = c & 15) > 12 || (blen[i+1] = c >> 4) > 12 ) { fseek (ifp, save, SEEK_SET); for (i=0; i < bsize; i+=8) { read_shorts (raw, 6); out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12; out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12; for (j=0; j < 6; j++) out[i+2+j] = raw[j] & 0xfff; } return 1; } } if ((bsize & 7) == 4) { bitbuf = fgetc(ifp) << 8; bitbuf += fgetc(ifp); bits = 16; } for (i=0; i < bsize; i++) { len = blen[i]; if (bits < len) { for (j=0; j < 32; j+=8) bitbuf += (INT64) fgetc(ifp) << (bits+(j^8)); bits += 32; } diff = bitbuf & (0xffff >> (16-len)); bitbuf >>= len; bits -= len; if ((diff & (1 << (len-1))) == 0) diff -= (1 << len) - 1; out[i] = diff; } return 0; } void CLASS kodak_65000_load_raw() { short buf[256]; int row, col, len, pred[2], ret, i; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=256) { pred[0] = pred[1] = 0; len = MIN (256, width-col); ret = kodak_65000_decode (buf, len); for (i=0; i < len; i++) if ((RAW(row,col+i) = curve[ret ? buf[i] : (pred[i & 1] += buf[i])]) >> 12) derror(); } } } void CLASS kodak_ycbcr_load_raw() { short buf[384], *bp; int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3]; ushort *ip; if (!image) return; unsigned int bits = (load_flags && load_flags > 9 && load_flags < 17)?load_flags:10; for (row=0; row < height; row+=2) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=128) { len = MIN (128, width-col); kodak_65000_decode (buf, len*3); y[0][1] = y[1][1] = cb = cr = 0; for (bp=buf, i=0; i < len; i+=2, bp+=2) { cb += bp[4]; cr += bp[5]; rgb[1] = -((cb + cr + 2) >> 2); rgb[2] = rgb[1] + cb; rgb[0] = rgb[1] + cr; for (j=0; j < 2; j++) for (k=0; k < 2; k++) { if ((y[j][k] = y[j][k^1] + *bp++) >> bits) derror(); ip = image[(row+j)*width + col+i+k]; FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)]; } } } } } void CLASS kodak_rgb_load_raw() { short buf[768], *bp; int row, col, len, c, i, rgb[3],ret; ushort *ip=image[0]; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col+=256) { len = MIN (256, width-col); ret = kodak_65000_decode (buf, len*3); memset (rgb, 0, sizeof rgb); for (bp=buf, i=0; i < len; i++, ip+=4) #ifdef LIBRAW_LIBRARY_BUILD if(load_flags == 12) { FORC3 ip[c] = ret ? (*bp++) : (rgb[c] += *bp++); } else #endif FORC3 if ((ip[c] = ret ? (*bp++) : (rgb[c] += *bp++)) >> 12) derror(); } } } void CLASS kodak_thumb_load_raw() { int row, col; colors = thumb_misc >> 5; for (row=0; row < height; row++) for (col=0; col < width; col++) read_shorts (image[row*width+col], colors); maximum = (1 << (thumb_misc & 31)) - 1; } void CLASS sony_decrypt (unsigned *data, int len, int start, int key) { #ifndef LIBRAW_NOTHREADS #define pad tls->sony_decrypt.pad #define p tls->sony_decrypt.p #else static unsigned pad[128], p; #endif if (start) { for (p=0; p < 4; p++) pad[p] = key = key * 48828125 + 1; pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31; for (p=4; p < 127; p++) pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31; for (p=0; p < 127; p++) pad[p] = htonl(pad[p]); } while (len--) { *data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127]; p++; } #ifndef LIBRAW_NOTHREADS #undef pad #undef p #endif } void CLASS sony_load_raw() { uchar head[40]; ushort *pixel; unsigned i, key, row, col; fseek (ifp, 200896, SEEK_SET); fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR); order = 0x4d4d; key = get4(); fseek (ifp, 164600, SEEK_SET); fread (head, 1, 40, ifp); sony_decrypt ((unsigned int *) head, 10, 1, key); for (i=26; i-- > 22; ) key = key << 8 | head[i]; fseek (ifp, data_offset, SEEK_SET); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pixel = raw_image + row*raw_width; if (fread (pixel, 2, raw_width, ifp) < raw_width) derror(); sony_decrypt ((unsigned int *) pixel, raw_width/2, !row, key); for (col=0; col < raw_width; col++) if ((pixel[col] = ntohs(pixel[col])) >> 14) derror(); } maximum = 0x3ff0; } void CLASS sony_arw_load_raw() { ushort huff[32770]; static const ushort tab[18] = { 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809, 0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 }; int i, c, n, col, row, sum=0; huff[0] = 15; for (n=i=0; i < 18; i++) FORC(32768 >> (tab[i] >> 8)) huff[++n] = tab[i]; getbits(-1); for (col = raw_width; col--; ) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (row=0; row < raw_height+1; row+=2) { if (row == raw_height) row = 1; if ((sum += ljpeg_diff(huff)) >> 12) derror(); if (row < height) RAW(row,col) = sum; } } } void CLASS sony_arw2_load_raw() { uchar *data, *dp; ushort pix[16]; int row, col, val, max, min, imax, imin, sh, bit, i; data = (uchar *) malloc (raw_width+1); merror (data, "sony_arw2_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD try { #endif for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fread (data, 1, raw_width, ifp); for (dp=data, col=0; col < raw_width-30; dp+=16) { max = 0x7ff & (val = sget4(dp)); min = 0x7ff & val >> 11; imax = 0x0f & val >> 22; imin = 0x0f & val >> 26; for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++); #ifdef LIBRAW_LIBRARY_BUILD /* flag checks if outside of loop */ if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_NONE || imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTATOVALUE ) { for (bit=30, i=0; i < 16; i++) if (i == imax) pix[i] = max; else if (i == imin) pix[i] = min; else { pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min; if (pix[i] > 0x7ff) pix[i] = 0x7ff; bit += 7; } } else if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_BASEONLY) { for (bit=30, i=0; i < 16; i++) if (i == imax) pix[i] = max; else if (i == imin) pix[i] = min; else pix[i]=0; } else if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTAONLY) { for (bit=30, i=0; i < 16; i++) if (i == imax) pix[i] = 0; else if (i == imin) pix[i] = 0; else { pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min; if (pix[i] > 0x7ff) pix[i] = 0x7ff; bit += 7; } } else if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTAZEROBASE) { for (bit=30, i=0; i < 16; i++) if (i == imax) pix[i] = 0; else if (i == imin) pix[i] = 0; else { pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh); if (pix[i] > 0x7ff) pix[i] = 0x7ff; bit += 7; } } #else /* unaltered dcraw processing */ for (bit=30, i=0; i < 16; i++) if (i == imax) pix[i] = max; else if (i == imin) pix[i] = min; else { pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min; if (pix[i] > 0x7ff) pix[i] = 0x7ff; bit += 7; } #endif #ifdef LIBRAW_LIBRARY_BUILD if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTATOVALUE) { for (i=0; i < 16; i++, col+=2) { unsigned slope = pix[i] < 1001? 2 : curve[pix[i]<<1]-curve[(pix[i]<<1)-2]; unsigned step = 1 << sh; RAW(row,col)=curve[pix[i]<<1]>black+imgdata.params.sony_arw2_posterization_thr? LIM(((slope*step*1000)/(curve[pix[i]<<1]-black)),0,10000):0; } } else { for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1]; } #else for (i=0; i < 16; i++, col+=2) RAW(row,col) = curve[pix[i] << 1] >> 2; #endif col -= col & 1 ? 1:31; } } #ifdef LIBRAW_LIBRARY_BUILD } catch(...) { free (data); throw; } if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTATOVALUE) maximum=10000; #endif free (data); } void CLASS samsung_load_raw() { int row, col, c, i, dir, op[4], len[4]; order = 0x4949; for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, strip_offset+row*4, SEEK_SET); fseek (ifp, data_offset+get4(), SEEK_SET); ph1_bits(-1); FORC4 len[c] = row < 2 ? 7:4; for (col=0; col < raw_width; col+=16) { dir = ph1_bits(1); FORC4 op[c] = ph1_bits(2); FORC4 switch (op[c]) { case 3: len[c] = ph1_bits(4); break; case 2: len[c]--; break; case 1: len[c]++; } for (c=0; c < 16; c+=2) { i = len[((c & 1) << 1) | (c >> 3)]; RAW(row,col+c) = ((signed) ph1_bits(i) << (32-i) >> (32-i)) + (dir ? RAW(row+(~c | -2),col+c) : col ? RAW(row,col+(c | -2)) : 128); if (c == 14) c = -1; } } } for (row=0; row < raw_height-1; row+=2) for (col=0; col < raw_width-1; col+=2) SWAP (RAW(row,col+1), RAW(row+1,col)); } void CLASS samsung2_load_raw() { static const ushort tab[14] = { 0x304,0x307,0x206,0x205,0x403,0x600,0x709, 0x80a,0x90b,0xa0c,0xa0d,0x501,0x408,0x402 }; ushort huff[1026], vpred[2][2] = {{0,0},{0,0}}, hpred[2]; int i, c, n, row, col, diff; huff[0] = 10; for (n=i=0; i < 14; i++) FORC(1024 >> (tab[i] >> 8)) huff[++n] = tab[i]; getbits(-1); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < raw_width; col++) { diff = ljpeg_diff (huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; RAW(row,col) = hpred[col & 1]; if (hpred[col & 1] >> tiff_bps) derror(); } } } void CLASS samsung3_load_raw() { int opt, init, mag, pmode, row, tab, col, pred, diff, i, c; ushort lent[3][2], len[4], *prow[2]; order = 0x4949; fseek (ifp, 9, SEEK_CUR); opt = fgetc(ifp); init = (get2(),get2()); for (row=0; row < raw_height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif fseek (ifp, (data_offset-ftell(ifp)) & 15, SEEK_CUR); ph1_bits(-1); mag = 0; pmode = 7; FORC(6) lent[0][c] = row < 2 ? 7:4; prow[ row & 1] = &RAW(row-1,1-((row & 1) << 1)); // green prow[~row & 1] = &RAW(row-2,0); // red and blue for (tab=0; tab+15 < raw_width; tab+=16) { if (~opt & 4 && !(tab & 63)) { i = ph1_bits(2); mag = i < 3 ? mag-'2'+"204"[i] : ph1_bits(12); } if (opt & 2) pmode = 7 - 4*ph1_bits(1); else if (!ph1_bits(1)) pmode = ph1_bits(3); if (opt & 1 || !ph1_bits(1)) { FORC4 len[c] = ph1_bits(2); FORC4 { i = ((row & 1) << 1 | (c & 1)) % 3; len[c] = len[c] < 3 ? lent[i][0]-'1'+"120"[len[c]] : ph1_bits(4); lent[i][0] = lent[i][1]; lent[i][1] = len[c]; } } FORC(16) { col = tab + (((c & 7) << 1)^(c >> 3)^(row & 1)); pred = (pmode == 7 || row < 2) ? (tab ? RAW(row,tab-2+(col & 1)) : init) : (prow[col & 1][col-'4'+"0224468"[pmode]] + prow[col & 1][col-'4'+"0244668"[pmode]] + 1) >> 1; diff = ph1_bits (i = len[c >> 2]); if (diff >> (i-1)) diff -= 1 << i; diff = diff * (mag*2+1) + mag; RAW(row,col) = pred + diff; } } } } #define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1) /* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */ void CLASS smal_decode_segment (unsigned seg[2][2], int holes) { uchar hist[3][13] = { { 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 }, { 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 }, { 3, 3, 0, 0, 63, 47, 31, 15, 0 } }; int low, high=0xff, carry=0, nbits=8; int pix, s, count, bin, next, i, sym[3]; uchar diff, pred[]={0,0}; ushort data=0, range=0; fseek (ifp, seg[0][1]+1, SEEK_SET); getbits(-1); for (pix=seg[0][0]; pix < seg[1][0]; pix++) { for (s=0; s < 3; s++) { data = data << nbits | getbits(nbits); if (carry < 0) carry = (nbits += carry+1) < 1 ? nbits-1 : 0; while (--nbits >= 0) if ((data >> nbits & 0xff) == 0xff) break; if (nbits > 0) data = ((data & ((1 << (nbits-1)) - 1)) << 1) | ((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits)); if (nbits >= 0) { data += getbits(1); carry = nbits - 8; } count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4); for (bin=0; hist[s][bin+5] > count; bin++); low = hist[s][bin+5] * (high >> 4) >> 2; if (bin) high = hist[s][bin+4] * (high >> 4) >> 2; high -= low; for (nbits=0; high << nbits < 128; nbits++); range = (range+low) << nbits; high <<= nbits; next = hist[s][1]; if (++hist[s][2] > hist[s][3]) { next = (next+1) & hist[s][0]; hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2; hist[s][2] = 1; } if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) { if (bin < hist[s][1]) for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--; else if (next <= bin) for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++; } hist[s][1] = next; sym[s] = bin; } diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3); if (sym[0] & 4) diff = diff ? -diff : 0x80; if (ftell(ifp) + 12 >= seg[1][1]) diff = 0; raw_image[pix] = pred[pix & 1] += diff; if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2; } maximum = 0xff; } void CLASS smal_v6_load_raw() { unsigned seg[2][2]; fseek (ifp, 16, SEEK_SET); seg[0][0] = 0; seg[0][1] = get2(); seg[1][0] = raw_width * raw_height; seg[1][1] = INT_MAX; smal_decode_segment (seg, 0); } int CLASS median4 (int *p) { int min, max, sum, i; min = max = sum = p[0]; for (i=1; i < 4; i++) { sum += p[i]; if (min > p[i]) min = p[i]; if (max < p[i]) max = p[i]; } return (sum - min - max) >> 1; } void CLASS fill_holes (int holes) { int row, col, val[4]; for (row=2; row < height-2; row++) { if (!HOLE(row)) continue; for (col=1; col < width-1; col+=4) { val[0] = RAW(row-1,col-1); val[1] = RAW(row-1,col+1); val[2] = RAW(row+1,col-1); val[3] = RAW(row+1,col+1); RAW(row,col) = median4(val); } for (col=2; col < width-2; col+=4) if (HOLE(row-2) || HOLE(row+2)) RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1; else { val[0] = RAW(row,col-2); val[1] = RAW(row,col+2); val[2] = RAW(row-2,col); val[3] = RAW(row+2,col); RAW(row,col) = median4(val); } } } void CLASS smal_v9_load_raw() { unsigned seg[256][2], offset, nseg, holes, i; fseek (ifp, 67, SEEK_SET); offset = get4(); nseg = fgetc(ifp); fseek (ifp, offset, SEEK_SET); for (i=0; i < nseg*2; i++) seg[0][i] = get4() + data_offset*(i & 1); fseek (ifp, 78, SEEK_SET); holes = fgetc(ifp); fseek (ifp, 88, SEEK_SET); seg[nseg][0] = raw_height * raw_width; seg[nseg][1] = get4() + data_offset; for (i=0; i < nseg; i++) smal_decode_segment (seg+i, holes); if (holes) fill_holes (holes); } void CLASS redcine_load_raw() { #ifndef NO_JASPER int c, row, col; jas_stream_t *in; jas_image_t *jimg; jas_matrix_t *jmat; jas_seqent_t *data; ushort *img, *pix; jas_init(); #ifndef LIBRAW_LIBRARY_BUILD in = jas_stream_fopen (ifname, "rb"); #else in = (jas_stream_t*)ifp->make_jas_stream(); if(!in) throw LIBRAW_EXCEPTION_DECODE_JPEG2000; #endif jas_stream_seek (in, data_offset+20, SEEK_SET); jimg = jas_image_decode (in, -1, 0); #ifndef LIBRAW_LIBRARY_BUILD if (!jimg) longjmp (failure, 3); #else if(!jimg) { jas_stream_close (in); throw LIBRAW_EXCEPTION_DECODE_JPEG2000; } #endif jmat = jas_matrix_create (height/2, width/2); merror (jmat, "redcine_load_raw()"); img = (ushort *) calloc ((height+2), (width+2)*2); merror (img, "redcine_load_raw()"); #ifdef LIBRAW_LIBRARY_BUILD bool fastexitflag = false; try { #endif FORC4 { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat); data = jas_matrix_getref (jmat, 0, 0); for (row = c >> 1; row < height; row+=2) for (col = c & 1; col < width; col+=2) img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2]; } for (col=1; col <= width; col++) { img[col] = img[2*(width+2)+col]; img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col]; } for (row=0; row < height+2; row++) { img[row*(width+2)] = img[row*(width+2)+2]; img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3]; } for (row=1; row <= height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1)); for ( ; col <= width; col+=2, pix+=2) { c = (((pix[0] - 0x800) << 3) + pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2; pix[0] = LIM(c,0,4095); } } for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]]; } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { fastexitflag=true; } #endif free (img); jas_matrix_destroy (jmat); jas_image_destroy (jimg); jas_stream_close (in); #ifdef LIBRAW_LIBRARY_BUILD if(fastexitflag) throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK; #endif #endif } //@end COMMON /* RESTRICTED code starts here */ void CLASS foveon_decoder (unsigned size, unsigned code) { static unsigned huff[1024]; struct decode *cur; int i, len; if (!code) { for (i=0; i < size; i++) huff[i] = get4(); memset (first_decode, 0, sizeof first_decode); free_decode = first_decode; } cur = free_decode++; if (free_decode > first_decode+2048) { fprintf (stderr,_("%s: decoder table overflow\n"), ifname); longjmp (failure, 2); } if (code) for (i=0; i < size; i++) if (huff[i] == code) { cur->leaf = i; return; } if ((len = code >> 27) > 26) return; code = (len+1) << 27 | (code & 0x3ffffff) << 1; cur->branch[0] = free_decode; foveon_decoder (size, code); cur->branch[1] = free_decode; foveon_decoder (size, code+1); } void CLASS foveon_thumb() { unsigned bwide, row, col, bitbuf=0, bit=1, c, i; char *buf; struct decode *dindex; short pred[3]; bwide = get4(); fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height); if (bwide > 0) { if (bwide < thumb_width*3) return; buf = (char *) malloc (bwide); merror (buf, "foveon_thumb()"); for (row=0; row < thumb_height; row++) { fread (buf, 1, bwide, ifp); fwrite (buf, 3, thumb_width, ofp); } free (buf); return; } foveon_decoder (256, 0); for (row=0; row < thumb_height; row++) { memset (pred, 0, sizeof pred); if (!bit) get4(); for (bit=col=0; col < thumb_width; col++) FORC3 { for (dindex=first_decode; dindex->branch[0]; ) { if ((bit = (bit-1) & 31) == 31) for (i=0; i < 4; i++) bitbuf = (bitbuf << 8) + fgetc(ifp); dindex = dindex->branch[bitbuf >> bit & 1]; } pred[c] += dindex->leaf; fputc (pred[c], ofp); } } } void CLASS foveon_sd_load_raw() { struct decode *dindex; short diff[1024]; unsigned bitbuf=0; int pred[3], row, col, bit=-1, c, i; read_shorts ((ushort *) diff, 1024); if (!load_flags) foveon_decoder (1024, 0); for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif memset (pred, 0, sizeof pred); if (!bit && !load_flags && atoi(model+2) < 14) get4(); for (col=bit=0; col < width; col++) { if (load_flags) { bitbuf = get4(); FORC3 pred[2-c] += diff[bitbuf >> c*10 & 0x3ff]; } else FORC3 { for (dindex=first_decode; dindex->branch[0]; ) { if ((bit = (bit-1) & 31) == 31) for (i=0; i < 4; i++) bitbuf = (bitbuf << 8) + fgetc(ifp); dindex = dindex->branch[bitbuf >> bit & 1]; } pred[c] += diff[dindex->leaf]; if (pred[c] >> 16 && ~pred[c] >> 16) derror(); } FORC3 image[row*width+col][c] = pred[c]; } } } void CLASS foveon_huff (ushort *huff) { int i, j, clen, code; huff[0] = 8; for (i=0; i < 13; i++) { clen = getc(ifp); code = getc(ifp); for (j=0; j < 256 >> clen; ) huff[code+ ++j] = clen << 8 | i; } get2(); } void CLASS foveon_dp_load_raw() { unsigned c, roff[4], row, col, diff; ushort huff[512], vpred[2][2], hpred[2]; fseek (ifp, 8, SEEK_CUR); foveon_huff (huff); roff[0] = 48; FORC3 roff[c+1] = -(-(roff[c] + get4()) & -16); FORC3 { fseek (ifp, data_offset+roff[c], SEEK_SET); getbits(-1); vpred[0][0] = vpred[0][1] = vpred[1][0] = vpred[1][1] = 512; for (row=0; row < height; row++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif for (col=0; col < width; col++) { diff = ljpeg_diff(huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; image[row*width+col][c] = hpred[col & 1]; } } } } void CLASS foveon_load_camf() { unsigned type, wide, high, i, j, row, col, diff; ushort huff[258], vpred[2][2] = {{512,512},{512,512}}, hpred[2]; fseek (ifp, meta_offset, SEEK_SET); type = get4(); get4(); get4(); wide = get4(); high = get4(); if (type == 2) { fread (meta_data, 1, meta_length, ifp); for (i=0; i < meta_length; i++) { high = (high * 1597 + 51749) % 244944; wide = high * (INT64) 301593171 >> 24; meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17; } } else if (type == 4) { free (meta_data); meta_data = (char *) malloc (meta_length = wide*high*3/2); merror (meta_data, "foveon_load_camf()"); foveon_huff (huff); get4(); getbits(-1); for (j=row=0; row < high; row++) { for (col=0; col < wide; col++) { diff = ljpeg_diff(huff); if (col < 2) hpred[col] = vpred[row & 1][col] += diff; else hpred[col & 1] += diff; if (col & 1) { meta_data[j++] = hpred[0] >> 4; meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8; meta_data[j++] = hpred[1]; } } } } #ifdef DCRAW_VERBOSE else fprintf (stderr,_("%s has unknown CAMF type %d.\n"), ifname, type); #endif } const char * CLASS foveon_camf_param (const char *block, const char *param) { unsigned idx, num; char *pos, *cp, *dp; for (idx=0; idx < meta_length; idx += sget4(pos+8)) { pos = meta_data + idx; if (strncmp (pos, "CMb", 3)) break; if (pos[3] != 'P') continue; if (strcmp (block, pos+sget4(pos+12))) continue; cp = pos + sget4(pos+16); num = sget4(cp); dp = pos + sget4(cp+4); while (num--) { cp += 8; if (!strcmp (param, dp+sget4(cp))) return dp+sget4(cp+4); } } return 0; } void * CLASS foveon_camf_matrix (unsigned dim[3], const char *name) { unsigned i, idx, type, ndim, size, *mat; char *pos, *cp, *dp; double dsize; for (idx=0; idx < meta_length; idx += sget4(pos+8)) { pos = meta_data + idx; if (strncmp (pos, "CMb", 3)) break; if (pos[3] != 'M') continue; if (strcmp (name, pos+sget4(pos+12))) continue; dim[0] = dim[1] = dim[2] = 1; cp = pos + sget4(pos+16); type = sget4(cp); if ((ndim = sget4(cp+4)) > 3) break; dp = pos + sget4(cp+8); for (i=ndim; i--; ) { cp += 12; dim[i] = sget4(cp); } if ((dsize = (double) dim[0]*dim[1]*dim[2]) > meta_length/4) break; mat = (unsigned *) malloc ((size = dsize) * 4); merror (mat, "foveon_camf_matrix()"); for (i=0; i < size; i++) if (type && type != 6) mat[i] = sget4(dp + i*4); else mat[i] = sget4(dp + i*2) & 0xffff; return mat; } #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: \"%s\" matrix not found!\n"), ifname, name); #endif return 0; } int CLASS foveon_fixed (void *ptr, int size, const char *name) { void *dp; unsigned dim[3]; if (!name) return 0; dp = foveon_camf_matrix (dim, name); if (!dp) return 0; memcpy (ptr, dp, size*4); free (dp); return 1; } float CLASS foveon_avg (short *pix, int range[2], float cfilt) { int i; float val, min=FLT_MAX, max=-FLT_MAX, sum=0; for (i=range[0]; i <= range[1]; i++) { sum += val = pix[i*4] + (pix[i*4]-pix[(i-1)*4]) * cfilt; if (min > val) min = val; if (max < val) max = val; } if (range[1] - range[0] == 1) return sum/2; return (sum - min - max) / (range[1] - range[0] - 1); } short * CLASS foveon_make_curve (double max, double mul, double filt) { short *curve; unsigned i, size; double x; if (!filt) filt = 0.8; size = 4*M_PI*max / filt; if (size == UINT_MAX) size--; curve = (short *) calloc (size+1, sizeof *curve); merror (curve, "foveon_make_curve()"); curve[0] = size; for (i=0; i < size; i++) { x = i*filt/max/4; curve[i+1] = (cos(x)+1)/2 * tanh(i*filt/mul) * mul + 0.5; } return curve; } void CLASS foveon_make_curves (short **curvep, float dq[3], float div[3], float filt) { double mul[3], max=0; int c; FORC3 mul[c] = dq[c]/div[c]; FORC3 if (max < mul[c]) max = mul[c]; FORC3 curvep[c] = foveon_make_curve (max, mul[c], filt); } int CLASS foveon_apply_curve (short *curve, int i) { if (abs(i) >= curve[0]) return 0; return i < 0 ? -curve[1-i] : curve[1+i]; } #define image ((short (*)[4]) image) void CLASS foveon_interpolate() { static const short hood[] = { -1,-1, -1,0, -1,1, 0,-1, 0,1, 1,-1, 1,0, 1,1 }; short *pix, prev[3], *curve[8], (*shrink)[3]; float cfilt=0, ddft[3][3][2], ppm[3][3][3]; float cam_xyz[3][3], correct[3][3], last[3][3], trans[3][3]; float chroma_dq[3], color_dq[3], diag[3][3], div[3]; float (*black)[3], (*sgain)[3], (*sgrow)[3]; float fsum[3], val, frow, num; int row, col, c, i, j, diff, sgx, irow, sum, min, max, limit; int dscr[2][2], dstb[4], (*smrow[7])[3], total[4], ipix[3]; int work[3][3], smlast, smred, smred_p=0, dev[3]; int satlev[3], keep[4], active[4]; unsigned dim[3], *badpix; double dsum=0, trsum[3]; char str[128]; const char* cp; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Foveon interpolation...\n")); #endif foveon_load_camf(); foveon_fixed (dscr, 4, "DarkShieldColRange"); foveon_fixed (ppm[0][0], 27, "PostPolyMatrix"); foveon_fixed (satlev, 3, "SaturationLevel"); foveon_fixed (keep, 4, "KeepImageArea"); foveon_fixed (active, 4, "ActiveImageArea"); foveon_fixed (chroma_dq, 3, "ChromaDQ"); foveon_fixed (color_dq, 3, foveon_camf_param ("IncludeBlocks", "ColorDQ") ? "ColorDQ" : "ColorDQCamRGB"); if (foveon_camf_param ("IncludeBlocks", "ColumnFilter")) foveon_fixed (&cfilt, 1, "ColumnFilter"); memset (ddft, 0, sizeof ddft); if (!foveon_camf_param ("IncludeBlocks", "DarkDrift") || !foveon_fixed (ddft[1][0], 12, "DarkDrift")) for (i=0; i < 2; i++) { foveon_fixed (dstb, 4, i ? "DarkShieldBottom":"DarkShieldTop"); for (row = dstb[1]; row <= dstb[3]; row++) for (col = dstb[0]; col <= dstb[2]; col++) FORC3 ddft[i+1][c][1] += (short) image[row*width+col][c]; FORC3 ddft[i+1][c][1] /= (dstb[3]-dstb[1]+1) * (dstb[2]-dstb[0]+1); } if (!(cp = foveon_camf_param ("WhiteBalanceIlluminants", model2))) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Invalid white balance \"%s\"\n"), ifname, model2); #endif return; } foveon_fixed (cam_xyz, 9, cp); foveon_fixed (correct, 9, foveon_camf_param ("WhiteBalanceCorrections", model2)); memset (last, 0, sizeof last); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 last[i][j] += correct[i][c] * cam_xyz[c][j]; #define LAST(x,y) last[(i+x)%3][(c+y)%3] for (i=0; i < 3; i++) FORC3 diag[c][i] = LAST(1,1)*LAST(2,2) - LAST(1,2)*LAST(2,1); #undef LAST FORC3 div[c] = diag[c][0]*0.3127 + diag[c][1]*0.329 + diag[c][2]*0.3583; sprintf (str, "%sRGBNeutral", model2); if (foveon_camf_param ("IncludeBlocks", str)) foveon_fixed (div, 3, str); num = 0; FORC3 if (num < div[c]) num = div[c]; FORC3 div[c] /= num; memset (trans, 0, sizeof trans); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 trans[i][j] += rgb_cam[i][c] * last[c][j] * div[j]; FORC3 trsum[c] = trans[c][0] + trans[c][1] + trans[c][2]; dsum = (6*trsum[0] + 11*trsum[1] + 3*trsum[2]) / 20; for (i=0; i < 3; i++) FORC3 last[i][c] = trans[i][c] * dsum / trsum[i]; memset (trans, 0, sizeof trans); for (i=0; i < 3; i++) for (j=0; j < 3; j++) FORC3 trans[i][j] += (i==c ? 32 : -1) * last[c][j] / 30; foveon_make_curves (curve, color_dq, div, cfilt); FORC3 chroma_dq[c] /= 3; foveon_make_curves (curve+3, chroma_dq, div, cfilt); FORC3 dsum += chroma_dq[c] / div[c]; curve[6] = foveon_make_curve (dsum, dsum, cfilt); curve[7] = foveon_make_curve (dsum*2, dsum*2, cfilt); sgain = (float (*)[3]) foveon_camf_matrix (dim, "SpatialGain"); if (!sgain) return; sgrow = (float (*)[3]) calloc (dim[1], sizeof *sgrow); sgx = (width + dim[1]-2) / (dim[1]-1); black = (float (*)[3]) calloc (height, sizeof *black); for (row=0; row < height; row++) { for (i=0; i < 6; i++) ddft[0][0][i] = ddft[1][0][i] + row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]); FORC3 black[row][c] = ( foveon_avg (image[row*width]+c, dscr[0], cfilt) + foveon_avg (image[row*width]+c, dscr[1], cfilt) * 3 - ddft[0][c][0] ) / 4 - ddft[0][c][1]; } memcpy (black, black+8, sizeof *black*8); memcpy (black+height-11, black+height-22, 11*sizeof *black); memcpy (last, black, sizeof last); for (row=1; row < height-1; row++) { FORC3 if (last[1][c] > last[0][c]) { if (last[1][c] > last[2][c]) black[row][c] = (last[0][c] > last[2][c]) ? last[0][c]:last[2][c]; } else if (last[1][c] < last[2][c]) black[row][c] = (last[0][c] < last[2][c]) ? last[0][c]:last[2][c]; memmove (last, last+1, 2*sizeof last[0]); memcpy (last[2], black[row+1], sizeof last[2]); } FORC3 black[row][c] = (last[0][c] + last[1][c])/2; FORC3 black[0][c] = (black[1][c] + black[3][c])/2; val = 1 - exp(-1/24.0); memcpy (fsum, black, sizeof fsum); for (row=1; row < height; row++) FORC3 fsum[c] += black[row][c] = (black[row][c] - black[row-1][c])*val + black[row-1][c]; memcpy (last[0], black[height-1], sizeof last[0]); FORC3 fsum[c] /= height; for (row = height; row--; ) FORC3 last[0][c] = black[row][c] = (black[row][c] - fsum[c] - last[0][c])*val + last[0][c]; memset (total, 0, sizeof total); for (row=2; row < height; row+=4) for (col=2; col < width; col+=4) { FORC3 total[c] += (short) image[row*width+col][c]; total[3]++; } for (row=0; row < height; row++) FORC3 black[row][c] += fsum[c]/2 + total[c]/(total[3]*100.0); for (row=0; row < height; row++) { for (i=0; i < 6; i++) ddft[0][0][i] = ddft[1][0][i] + row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]); pix = image[row*width]; memcpy (prev, pix, sizeof prev); frow = row / (height-1.0) * (dim[2]-1); if ((irow = frow) == dim[2]-1) irow--; frow -= irow; for (i=0; i < dim[1]; i++) FORC3 sgrow[i][c] = sgain[ irow *dim[1]+i][c] * (1-frow) + sgain[(irow+1)*dim[1]+i][c] * frow; for (col=0; col < width; col++) { FORC3 { diff = pix[c] - prev[c]; prev[c] = pix[c]; ipix[c] = pix[c] + floor ((diff + (diff*diff >> 14)) * cfilt - ddft[0][c][1] - ddft[0][c][0] * ((float) col/width - 0.5) - black[row][c] ); } FORC3 { work[0][c] = ipix[c] * ipix[c] >> 14; work[2][c] = ipix[c] * work[0][c] >> 14; work[1][2-c] = ipix[(c+1) % 3] * ipix[(c+2) % 3] >> 14; } FORC3 { for (val=i=0; i < 3; i++) for ( j=0; j < 3; j++) val += ppm[c][i][j] * work[i][j]; ipix[c] = floor ((ipix[c] + floor(val)) * ( sgrow[col/sgx ][c] * (sgx - col%sgx) + sgrow[col/sgx+1][c] * (col%sgx) ) / sgx / div[c]); if (ipix[c] > 32000) ipix[c] = 32000; pix[c] = ipix[c]; } pix += 4; } } free (black); free (sgrow); free (sgain); if ((badpix = (unsigned int *) foveon_camf_matrix (dim, "BadPixels"))) { for (i=0; i < dim[0]; i++) { col = (badpix[i] >> 8 & 0xfff) - keep[0]; row = (badpix[i] >> 20 ) - keep[1]; if ((unsigned)(row-1) > height-3 || (unsigned)(col-1) > width-3) continue; memset (fsum, 0, sizeof fsum); for (sum=j=0; j < 8; j++) if (badpix[i] & (1 << j)) { FORC3 fsum[c] += (short) image[(row+hood[j*2])*width+col+hood[j*2+1]][c]; sum++; } if (sum) FORC3 image[row*width+col][c] = fsum[c]/sum; } free (badpix); } /* Array for 5x5 Gaussian averaging of red values */ smrow[6] = (int (*)[3]) calloc (width*5, sizeof **smrow); merror (smrow[6], "foveon_interpolate()"); for (i=0; i < 5; i++) smrow[i] = smrow[6] + i*width; /* Sharpen the reds against these Gaussian averages */ for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { smrow[4][col][0] = (pix[0]*6 + (pix[-4]+pix[4])*4 + pix[-8]+pix[8] + 8) >> 4; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { smred = ( 6 * smrow[2][col][0] + 4 * (smrow[1][col][0] + smrow[3][col][0]) + smrow[0][col][0] + smrow[4][col][0] + 8 ) >> 4; if (col == 2) smred_p = smred; i = pix[0] + ((pix[0] - ((smred*7 + smred_p) >> 3)) >> 3); if (i > 32000) i = 32000; pix[0] = i; smred_p = smred; pix += 4; } } /* Adjust the brighter pixels for better linearity */ min = 0xffff; FORC3 { i = satlev[c] / div[c]; if (min > i) min = i; } limit = min * 9 >> 4; for (pix=image[0]; pix < image[height*width]; pix+=4) { if (pix[0] <= limit || pix[1] <= limit || pix[2] <= limit) continue; min = max = pix[0]; for (c=1; c < 3; c++) { if (min > pix[c]) min = pix[c]; if (max < pix[c]) max = pix[c]; } if (min >= limit*2) { pix[0] = pix[1] = pix[2] = max; } else { i = 0x4000 - ((min - limit) << 14) / limit; i = 0x4000 - (i*i >> 14); i = i*i >> 14; FORC3 pix[c] += (max - pix[c]) * i >> 14; } } /* Because photons that miss one detector often hit another, the sum R+G+B is much less noisy than the individual colors. So smooth the hues without smoothing the total. */ for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { FORC3 smrow[4][col][c] = (pix[c-4]+2*pix[c]+pix[c+4]+2) >> 2; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { FORC3 dev[c] = -foveon_apply_curve (curve[7], pix[c] - ((smrow[1][col][c] + 2*smrow[2][col][c] + smrow[3][col][c]) >> 2)); sum = (dev[0] + dev[1] + dev[2]) >> 3; FORC3 pix[c] += dev[c] - sum; pix += 4; } } for (smlast=-1, row=2; row < height-2; row++) { while (smlast < row+2) { for (i=0; i < 6; i++) smrow[(i+5) % 6] = smrow[i]; pix = image[++smlast*width+2]; for (col=2; col < width-2; col++) { FORC3 smrow[4][col][c] = (pix[c-8]+pix[c-4]+pix[c]+pix[c+4]+pix[c+8]+2) >> 2; pix += 4; } } pix = image[row*width+2]; for (col=2; col < width-2; col++) { for (total[3]=375, sum=60, c=0; c < 3; c++) { for (total[c]=i=0; i < 5; i++) total[c] += smrow[i][col][c]; total[3] += total[c]; sum += pix[c]; } if (sum < 0) sum = 0; j = total[3] > 375 ? (sum << 16) / total[3] : sum * 174; FORC3 pix[c] += foveon_apply_curve (curve[6], ((j*total[c] + 0x8000) >> 16) - pix[c]); pix += 4; } } /* Transform the image to a different colorspace */ for (pix=image[0]; pix < image[height*width]; pix+=4) { FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]); sum = (pix[0]+pix[1]+pix[1]+pix[2]) >> 2; FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]-sum); FORC3 { for (dsum=i=0; i < 3; i++) dsum += trans[c][i] * pix[i]; if (dsum < 0) dsum = 0; if (dsum > 24000) dsum = 24000; ipix[c] = dsum + 0.5; } FORC3 pix[c] = ipix[c]; } /* Smooth the image bottom-to-top and save at 1/4 scale */ shrink = (short (*)[3]) calloc ((height/4), (width/4)*sizeof *shrink); merror (shrink, "foveon_interpolate()"); for (row = height/4; row--; ) for (col=0; col < width/4; col++) { ipix[0] = ipix[1] = ipix[2] = 0; for (i=0; i < 4; i++) for (j=0; j < 4; j++) FORC3 ipix[c] += image[(row*4+i)*width+col*4+j][c]; FORC3 if (row+2 > height/4) shrink[row*(width/4)+col][c] = ipix[c] >> 4; else shrink[row*(width/4)+col][c] = (shrink[(row+1)*(width/4)+col][c]*1840 + ipix[c]*141 + 2048) >> 12; } /* From the 1/4-scale image, smooth right-to-left */ for (row=0; row < (height & ~3); row++) { ipix[0] = ipix[1] = ipix[2] = 0; if ((row & 3) == 0) for (col = width & ~3 ; col--; ) FORC3 smrow[0][col][c] = ipix[c] = (shrink[(row/4)*(width/4)+col/4][c]*1485 + ipix[c]*6707 + 4096) >> 13; /* Then smooth left-to-right */ ipix[0] = ipix[1] = ipix[2] = 0; for (col=0; col < (width & ~3); col++) FORC3 smrow[1][col][c] = ipix[c] = (smrow[0][col][c]*1485 + ipix[c]*6707 + 4096) >> 13; /* Smooth top-to-bottom */ if (row == 0) memcpy (smrow[2], smrow[1], sizeof **smrow * width); else for (col=0; col < (width & ~3); col++) FORC3 smrow[2][col][c] = (smrow[2][col][c]*6707 + smrow[1][col][c]*1485 + 4096) >> 13; /* Adjust the chroma toward the smooth values */ for (col=0; col < (width & ~3); col++) { for (i=j=30, c=0; c < 3; c++) { i += smrow[2][col][c]; j += image[row*width+col][c]; } j = (j << 16) / i; for (sum=c=0; c < 3; c++) { ipix[c] = foveon_apply_curve (curve[c+3], ((smrow[2][col][c] * j + 0x8000) >> 16) - image[row*width+col][c]); sum += ipix[c]; } sum >>= 3; FORC3 { i = image[row*width+col][c] + ipix[c] - sum; if (i < 0) i = 0; image[row*width+col][c] = i; } } } free (shrink); free (smrow[6]); for (i=0; i < 8; i++) free (curve[i]); /* Trim off the black border */ active[1] -= keep[1]; active[3] -= 2; i = active[2] - active[0]; for (row=0; row < active[3]-active[1]; row++) memcpy (image[row*i], image[(row+active[1])*width+active[0]], i * sizeof *image); width = i; height = row; } #undef image /* RESTRICTED code ends here */ //@out COMMON void CLASS crop_masked_pixels() { int row, col; unsigned #ifndef LIBRAW_LIBRARY_BUILD r, raw_pitch = raw_width*2, c, m, mblack[8], zero, val; #else c, m, zero, val; #define mblack imgdata.color.black_stat #endif #ifndef LIBRAW_LIBRARY_BUILD if (load_raw == &CLASS phase_one_load_raw || load_raw == &CLASS phase_one_load_raw_c) phase_one_correct(); if (fuji_width) { for (row=0; row < raw_height-top_margin*2; row++) { for (col=0; col < fuji_width << !fuji_layout; col++) { if (fuji_layout) { r = fuji_width - 1 - col + (row >> 1); c = col + ((row+1) >> 1); } else { r = fuji_width - 1 + row - (col >> 1); c = row + ((col+1) >> 1); } if (r < height && c < width) BAYER(r,c) = RAW(row+top_margin,col+left_margin); } } } else { for (row=0; row < height; row++) for (col=0; col < width; col++) BAYER2(row,col) = RAW(row+top_margin,col+left_margin); } #endif if (mask[0][3] > 0) goto mask_set; if (load_raw == &CLASS canon_load_raw || load_raw == &CLASS lossless_jpeg_load_raw) { mask[0][1] = mask[1][1] += 2; mask[0][3] -= 2; goto sides; } if (load_raw == &CLASS canon_600_load_raw || load_raw == &CLASS sony_load_raw || (load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) || load_raw == &CLASS kodak_262_load_raw || (load_raw == &CLASS packed_load_raw && (load_flags & 32))) { sides: mask[0][0] = mask[1][0] = top_margin; mask[0][2] = mask[1][2] = top_margin+height; mask[0][3] += left_margin; mask[1][1] += left_margin+width; mask[1][3] += raw_width; } if (load_raw == &CLASS nokia_load_raw) { mask[0][2] = top_margin; mask[0][3] = width; } mask_set: memset (mblack, 0, sizeof mblack); for (zero=m=0; m < 8; m++) for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++) for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) { c = FC(row-top_margin,col-left_margin); mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)]; mblack[4+c]++; zero += !val; } if (load_raw == &CLASS canon_600_load_raw && width < raw_width) { black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) / (mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4; #ifndef LIBRAW_LIBRARY_BUILD canon_600_correct(); #endif } else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7]) { FORC4 cblack[c] = mblack[c] / mblack[4+c]; cblack[4] = cblack[5] = cblack[6] = 0; } } #ifdef LIBRAW_LIBRARY_BUILD #undef mblack #endif void CLASS remove_zeroes() { unsigned row, col, tot, n, r, c; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2); #endif for (row=0; row < height; row++) for (col=0; col < width; col++) if (BAYER(row,col) == 0) { tot = n = 0; for (r = row-2; r <= row+2; r++) for (c = col-2; c <= col+2; c++) if (r < height && c < width && FC(r,c) == FC(row,col) && BAYER(r,c)) tot += (n++,BAYER(r,c)); if (n) BAYER(row,col) = tot/n; } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2); #endif } //@end COMMON /* @out FILEIO #include <math.h> #define CLASS LibRaw:: #include "libraw/libraw_types.h" #define LIBRAW_LIBRARY_BUILD #include "libraw/libraw.h" #include "internal/defines.h" #include "internal/var_defines.h" @end FILEIO */ // @out FILEIO /* Seach from the current directory up to the root looking for a ".badpixels" file, and fix those pixels now. */ void CLASS bad_pixels (const char *cfname) { FILE *fp=NULL; #ifndef LIBRAW_LIBRARY_BUILD char *fname, *cp, line[128]; int len, time, row, col, r, c, rad, tot, n, fixed=0; #else char *cp, line[128]; int time, row, col, r, c, rad, tot, n; #ifdef DCRAW_VERBOSE int fixed = 0; #endif #endif if (!filters) return; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2); #endif if (cfname) fp = fopen (cfname, "r"); // @end FILEIO else { for (len=32 ; ; len *= 2) { fname = (char *) malloc (len); if (!fname) return; if (getcwd (fname, len-16)) break; free (fname); if (errno != ERANGE) return; } #if defined(WIN32) || defined(DJGPP) if (fname[1] == ':') memmove (fname, fname+2, len-2); for (cp=fname; *cp; cp++) if (*cp == '\\') *cp = '/'; #endif cp = fname + strlen(fname); if (cp[-1] == '/') cp--; while (*fname == '/') { strcpy (cp, "/.badpixels"); if ((fp = fopen (fname, "r"))) break; if (cp == fname) break; while (*--cp != '/'); } free (fname); } // @out FILEIO if (!fp) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP; #endif return; } while (fgets (line, 128, fp)) { cp = strchr (line, '#'); if (cp) *cp = 0; if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue; if ((unsigned) col >= width || (unsigned) row >= height) continue; if (time > timestamp) continue; for (tot=n=0, rad=1; rad < 3 && n==0; rad++) for (r = row-rad; r <= row+rad; r++) for (c = col-rad; c <= col+rad; c++) if ((unsigned) r < height && (unsigned) c < width && (r != row || c != col) && fcol(r,c) == fcol(row,col)) { tot += BAYER2(r,c); n++; } BAYER2(row,col) = tot/n; #ifdef DCRAW_VERBOSE if (verbose) { if (!fixed++) fprintf (stderr,_("Fixed dead pixels at:")); fprintf (stderr, " %d,%d", col, row); } #endif } #ifdef DCRAW_VERBOSE if (fixed) fputc ('\n', stderr); #endif fclose (fp); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2); #endif } void CLASS subtract (const char *fname) { FILE *fp; int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col; ushort *pixel; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2); #endif if (!(fp = fopen (fname, "rb"))) { #ifdef DCRAW_VERBOSE perror (fname); #endif #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE; #endif return; } if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1; while (!error && nd < 3 && (c = fgetc(fp)) != EOF) { if (c == '#') comment = 1; if (c == '\n') comment = 0; if (comment) continue; if (isdigit(c)) number = 1; if (number) { if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0'; else if (isspace(c)) { number = 0; nd++; } else error = 1; } } if (error || nd < 3) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s is not a valid PGM file!\n"), fname); #endif fclose (fp); return; } else if (dim[0] != width || dim[1] != height || dim[2] != 65535) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s has the wrong dimensions!\n"), fname); #endif #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM; #endif fclose (fp); return; } pixel = (ushort *) calloc (width, sizeof *pixel); merror (pixel, "subtract()"); for (row=0; row < height; row++) { fread (pixel, 2, width, fp); for (col=0; col < width; col++) BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0); } free (pixel); fclose (fp); memset (cblack, 0, sizeof cblack); black = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2); #endif } //@end FILEIO //@out COMMON static const uchar xlat[2][256] = { { 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d, 0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d, 0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f, 0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f, 0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1, 0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17, 0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89, 0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f, 0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b, 0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb, 0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3, 0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f, 0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35, 0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43, 0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5, 0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 }, { 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c, 0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34, 0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad, 0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05, 0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee, 0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d, 0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b, 0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b, 0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc, 0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33, 0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8, 0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6, 0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c, 0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49, 0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb, 0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } }; void CLASS gamma_curve (double pwr, double ts, int mode, int imax) { int i; double g[6], bnd[2]={0,0}, r; g[0] = pwr; g[1] = ts; g[2] = g[3] = g[4] = 0; bnd[g[1] >= 1] = 1; if (g[1] && (g[1]-1)*(g[0]-1) <= 0) { for (i=0; i < 48; i++) { g[2] = (bnd[0] + bnd[1])/2; if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2]; else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2]; } g[3] = g[2] / g[1]; if (g[0]) g[4] = g[2] * (1/g[0] - 1); } if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) + (1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1; else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1 - g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1; if (!mode--) { memcpy (gamm, g, sizeof gamm); return; } for (i=0; i < 0x10000; i++) { curve[i] = 0xffff; if ((r = (double) i / imax) < 1) curve[i] = 0x10000 * ( mode ? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1)) : (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2])))); } } void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size) { double work[3][6], num; int i, j, k; for (i=0; i < 3; i++) { for (j=0; j < 6; j++) work[i][j] = j == i+3; for (j=0; j < 3; j++) for (k=0; k < size; k++) work[i][j] += in[k][i] * in[k][j]; } for (i=0; i < 3; i++) { num = work[i][i]; for (j=0; j < 6; j++) work[i][j] /= num; for (k=0; k < 3; k++) { if (k==i) continue; num = work[k][i]; for (j=0; j < 6; j++) work[k][j] -= work[i][j] * num; } } for (i=0; i < size; i++) for (j=0; j < 3; j++) for (out[i][j]=k=0; k < 3; k++) out[i][j] += work[j][k+3] * in[i][k]; } void CLASS cam_xyz_coeff (float _rgb_cam[3][4], double cam_xyz[4][3]) { double cam_rgb[4][3], inverse[4][3], num; int i, j, k; for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */ for (j=0; j < 3; j++) for (cam_rgb[i][j] = k=0; k < 3; k++) cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j]; for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */ for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */ num += cam_rgb[i][j]; if(num > 0.00001) { for (j=0; j < 3; j++) cam_rgb[i][j] /= num; pre_mul[i] = 1 / num; } else { for (j=0; j < 3; j++) cam_rgb[i][j] = 0.0; pre_mul[i] = 1.0; } } pseudoinverse (cam_rgb, inverse, colors); for (i=0; i < 3; i++) for (j=0; j < colors; j++) _rgb_cam[i][j] = inverse[j][i]; } #ifdef COLORCHECK void CLASS colorcheck() { #define NSQ 24 // Coordinates of the GretagMacbeth ColorChecker squares // width, height, 1st_column, 1st_row int cut[NSQ][4]; // you must set these // ColorChecker Chart under 6500-kelvin illumination static const double gmb_xyY[NSQ][3] = { { 0.400, 0.350, 10.1 }, // Dark Skin { 0.377, 0.345, 35.8 }, // Light Skin { 0.247, 0.251, 19.3 }, // Blue Sky { 0.337, 0.422, 13.3 }, // Foliage { 0.265, 0.240, 24.3 }, // Blue Flower { 0.261, 0.343, 43.1 }, // Bluish Green { 0.506, 0.407, 30.1 }, // Orange { 0.211, 0.175, 12.0 }, // Purplish Blue { 0.453, 0.306, 19.8 }, // Moderate Red { 0.285, 0.202, 6.6 }, // Purple { 0.380, 0.489, 44.3 }, // Yellow Green { 0.473, 0.438, 43.1 }, // Orange Yellow { 0.187, 0.129, 6.1 }, // Blue { 0.305, 0.478, 23.4 }, // Green { 0.539, 0.313, 12.0 }, // Red { 0.448, 0.470, 59.1 }, // Yellow { 0.364, 0.233, 19.8 }, // Magenta { 0.196, 0.252, 19.8 }, // Cyan { 0.310, 0.316, 90.0 }, // White { 0.310, 0.316, 59.1 }, // Neutral 8 { 0.310, 0.316, 36.2 }, // Neutral 6.5 { 0.310, 0.316, 19.8 }, // Neutral 5 { 0.310, 0.316, 9.0 }, // Neutral 3.5 { 0.310, 0.316, 3.1 } }; // Black double gmb_cam[NSQ][4], gmb_xyz[NSQ][3]; double inverse[NSQ][3], cam_xyz[4][3], balance[4], num; int c, i, j, k, sq, row, col, pass, count[4]; memset (gmb_cam, 0, sizeof gmb_cam); for (sq=0; sq < NSQ; sq++) { FORCC count[c] = 0; for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++) for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) { c = FC(row,col); if (c >= colors) c -= 2; gmb_cam[sq][c] += BAYER2(row,col); BAYER2(row,col) = black + (BAYER2(row,col)-black)/2; count[c]++; } FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black; gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1]; gmb_xyz[sq][1] = gmb_xyY[sq][2]; gmb_xyz[sq][2] = gmb_xyY[sq][2] * (1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1]; } pseudoinverse (gmb_xyz, inverse, NSQ); for (pass=0; pass < 2; pass++) { for (raw_color = i=0; i < colors; i++) for (j=0; j < 3; j++) for (cam_xyz[i][j] = k=0; k < NSQ; k++) cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j]; cam_xyz_coeff (rgb_cam, cam_xyz); FORCC balance[c] = pre_mul[c] * gmb_cam[20][c]; for (sq=0; sq < NSQ; sq++) FORCC gmb_cam[sq][c] *= balance[c]; } if (verbose) { printf (" { \"%s %s\", %d,\n\t{", make, model, black); num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]); FORCC for (j=0; j < 3; j++) printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5)); puts (" } },"); } #undef NSQ } #endif void CLASS hat_transform (float *temp, float *base, int st, int size, int sc) { int i; for (i=0; i < sc; i++) temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)]; for (; i+sc < size; i++) temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)]; for (; i < size; i++) temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))]; } #if !defined(LIBRAW_USE_OPENMP) void CLASS wavelet_denoise() { float *fimg=0, *temp, thold, mul[2], avg, diff; int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2]; ushort *window[4]; static const float noise[] = { 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Wavelet denoising...\n")); #endif while (maximum << scale < 0x10000) scale++; maximum <<= --scale; black <<= scale; FORC4 cblack[c] <<= scale; if ((size = iheight*iwidth) < 0x15550000) fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg); merror (fimg, "wavelet_denoise()"); temp = fimg + size*3; if ((nc = colors) == 3 && filters) nc++; FORC(nc) { /* denoise R,G1,B,G3 individually */ for (i=0; i < size; i++) fimg[i] = 256 * sqrt((double)(image[i][c] << scale)); for (hpass=lev=0; lev < 5; lev++) { lpass = size*((lev & 1)+1); for (row=0; row < iheight; row++) { hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev); for (col=0; col < iwidth; col++) fimg[lpass + row*iwidth + col] = temp[col] * 0.25; } for (col=0; col < iwidth; col++) { hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev); for (row=0; row < iheight; row++) fimg[lpass + row*iwidth + col] = temp[row] * 0.25; } thold = threshold * noise[lev]; for (i=0; i < size; i++) { fimg[hpass+i] -= fimg[lpass+i]; if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold; else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold; else fimg[hpass+i] = 0; if (hpass) fimg[i] += fimg[hpass+i]; } hpass = lpass; } for (i=0; i < size; i++) image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000); } if (filters && colors == 3) { /* pull G1 and G3 closer together */ for (row=0; row < 2; row++) { mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1]; blk[row] = cblack[FC(row,0) | 1]; } for (i=0; i < 4; i++) window[i] = (ushort *) fimg + width*i; for (wlast=-1, row=1; row < height-1; row++) { while (wlast < row+1) { for (wlast++, i=0; i < 4; i++) window[(i+3) & 3] = window[i]; for (col = FC(wlast,1) & 1; col < width; col+=2) window[2][col] = BAYER(wlast,col); } thold = threshold/512; for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) { avg = ( window[0][col-1] + window[0][col+1] + window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 ) * mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5; avg = avg < 0 ? 0 : sqrt(avg); diff = sqrt((double)BAYER(row,col)) - avg; if (diff < -thold) diff += thold; else if (diff > thold) diff -= thold; else diff = 0; BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5); } } } free (fimg); } #else /* LIBRAW_USE_OPENMP */ void CLASS wavelet_denoise() { float *fimg=0, *temp, thold, mul[2], avg, diff; int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2]; ushort *window[4]; static const float noise[] = { 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Wavelet denoising...\n")); #endif while (maximum << scale < 0x10000) scale++; maximum <<= --scale; black <<= scale; FORC4 cblack[c] <<= scale; if ((size = iheight*iwidth) < 0x15550000) fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg); merror (fimg, "wavelet_denoise()"); temp = fimg + size*3; if ((nc = colors) == 3 && filters) nc++; #ifdef LIBRAW_LIBRARY_BUILD #pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size) #endif { temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg); FORC(nc) { /* denoise R,G1,B,G3 individually */ #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) fimg[i] = 256 * sqrt((double)(image[i][c] << scale)); for (hpass=lev=0; lev < 5; lev++) { lpass = size*((lev & 1)+1); #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (row=0; row < iheight; row++) { hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev); for (col=0; col < iwidth; col++) fimg[lpass + row*iwidth + col] = temp[col] * 0.25; } #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (col=0; col < iwidth; col++) { hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev); for (row=0; row < iheight; row++) fimg[lpass + row*iwidth + col] = temp[row] * 0.25; } thold = threshold * noise[lev]; #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) { fimg[hpass+i] -= fimg[lpass+i]; if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold; else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold; else fimg[hpass+i] = 0; if (hpass) fimg[i] += fimg[hpass+i]; } hpass = lpass; } #ifdef LIBRAW_LIBRARY_BUILD #pragma omp for #endif for (i=0; i < size; i++) image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000); } free(temp); } /* end omp parallel */ /* the following loops are hard to parallize, no idea yes, * problem is wlast which is carrying dependency * second part should be easyer, but did not yet get it right. */ if (filters && colors == 3) { /* pull G1 and G3 closer together */ for (row=0; row < 2; row++){ mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1]; blk[row] = cblack[FC(row,0) | 1]; } for (i=0; i < 4; i++) window[i] = (ushort *) fimg + width*i; for (wlast=-1, row=1; row < height-1; row++) { while (wlast < row+1) { for (wlast++, i=0; i < 4; i++) window[(i+3) & 3] = window[i]; for (col = FC(wlast,1) & 1; col < width; col+=2) window[2][col] = BAYER(wlast,col); } thold = threshold/512; for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) { avg = ( window[0][col-1] + window[0][col+1] + window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 ) * mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5; avg = avg < 0 ? 0 : sqrt(avg); diff = sqrt((double)BAYER(row,col)) - avg; if (diff < -thold) diff += thold; else if (diff > thold) diff -= thold; else diff = 0; BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5); } } } free (fimg); } #endif // green equilibration void CLASS green_matching() { int i,j; double m1,m2,c1,c2; int o1_1,o1_2,o1_3,o1_4; int o2_1,o2_2,o2_3,o2_4; ushort (*img)[4]; const int margin = 3; int oj = 2, oi = 2; float f; const float thr = 0.01f; if(half_size || shrink) return; if(FC(oj, oi) != 3) oj++; if(FC(oj, oi) != 3) oi++; if(FC(oj, oi) != 3) oj--; img = (ushort (*)[4]) calloc (height*width, sizeof *image); merror (img, "green_matching()"); memcpy(img,image,height*width*sizeof *image); for(j=oj;j<height-margin;j+=2) for(i=oi;i<width-margin;i+=2){ o1_1=img[(j-1)*width+i-1][1]; o1_2=img[(j-1)*width+i+1][1]; o1_3=img[(j+1)*width+i-1][1]; o1_4=img[(j+1)*width+i+1][1]; o2_1=img[(j-2)*width+i][3]; o2_2=img[(j+2)*width+i][3]; o2_3=img[j*width+i-2][3]; o2_4=img[j*width+i+2][3]; m1=(o1_1+o1_2+o1_3+o1_4)/4.0; m2=(o2_1+o2_2+o2_3+o2_4)/4.0; c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0; c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0; if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr)) { f = image[j*width+i][3]*m1/m2; image[j*width+i][3]=f>0xffff?0xffff:f; } } free(img); } void CLASS scale_colors() { unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8]; int val, dark, sat; double dsum[8], dmin, dmax; float scale_mul[4], fr, fc; ushort *img=0, *pix; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2); #endif if (user_mul[0]) memcpy (pre_mul, user_mul, sizeof pre_mul); if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) { memset (dsum, 0, sizeof dsum); bottom = MIN (greybox[1]+greybox[3], height); right = MIN (greybox[0]+greybox[2], width); for (row=greybox[1]; row < bottom; row += 8) for (col=greybox[0]; col < right; col += 8) { memset (sum, 0, sizeof sum); for (y=row; y < row+8 && y < bottom; y++) for (x=col; x < col+8 && x < right; x++) FORC4 { if (filters) { c = fcol(y,x); val = BAYER2(y,x); } else val = image[y*width+x][c]; if (val > maximum-25) goto skip_block; if ((val -= cblack[c]) < 0) val = 0; sum[c] += val; sum[c+4]++; if (filters) break; } FORC(8) dsum[c] += sum[c]; skip_block: ; } FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c]; } if (use_camera_wb && cam_mul[0] != -1) { memset (sum, 0, sizeof sum); for (row=0; row < 8; row++) for (col=0; col < 8; col++) { c = FC(row,col); if ((val = white[row][col] - cblack[c]) > 0) sum[c] += val; sum[c+4]++; } #ifdef LIBRAW_LIBRARY_BUILD if(load_raw == &LibRaw::nikon_load_sraw) { // Nikon sRAW: camera WB already applied: pre_mul[0]=pre_mul[1]=pre_mul[2]=pre_mul[3]=1.0; } else #endif if (sum[0] && sum[1] && sum[2] && sum[3]) FORC4 pre_mul[c] = (float) sum[c+4] / sum[c]; else if (cam_mul[0] && cam_mul[2]) memcpy (pre_mul, cam_mul, sizeof pre_mul); else { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname); #endif } } #ifdef LIBRAW_LIBRARY_BUILD // Nikon sRAW, daylight if (load_raw == &LibRaw::nikon_load_sraw && !use_camera_wb && !use_auto_wb && cam_mul[0] > 0.001f && cam_mul[1] > 0.001f && cam_mul[2] > 0.001f ) { for(c=0;c<3;c++) pre_mul[c]/=cam_mul[c]; } #endif if (pre_mul[1] == 0) pre_mul[1] = 1; if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1; dark = black; sat = maximum; if (threshold) wavelet_denoise(); maximum -= black; for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) { if (dmin > pre_mul[c]) dmin = pre_mul[c]; if (dmax < pre_mul[c]) dmax = pre_mul[c]; } if (!highlight) dmax = dmin; FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum; #ifdef DCRAW_VERBOSE if (verbose) { fprintf (stderr, _("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat); FORC4 fprintf (stderr, " %f", pre_mul[c]); fputc ('\n', stderr); } #endif if (filters > 1000 && (cblack[4]+1)/2 == 1 && (cblack[5]+1)/2 == 1) { FORC4 cblack[FC(c/2,c%2)] += cblack[6 + c/2 % cblack[4] * cblack[5] + c%2 % cblack[5]]; cblack[4] = cblack[5] = 0; } size = iheight*iwidth; #ifdef LIBRAW_LIBRARY_BUILD scale_colors_loop(scale_mul); #else for (i=0; i < size*4; i++) { if (!(val = image[0][i])) continue; if (cblack[4] && cblack[5]) val -= cblack[6 + i/4 / iwidth % cblack[4] * cblack[5] + i/4 % iwidth % cblack[5]]; val -= cblack[i & 3]; val *= scale_mul[i & 3]; image[0][i] = CLIP(val); } #endif if ((aber[0] != 1 || aber[2] != 1) && colors == 3) { #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Correcting chromatic aberration...\n")); #endif for (c=0; c < 4; c+=2) { if (aber[c] == 1) continue; img = (ushort *) malloc (size * sizeof *img); merror (img, "scale_colors()"); for (i=0; i < size; i++) img[i] = image[i][c]; for (row=0; row < iheight; row++) { ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5; if (ur > iheight-2) continue; fr -= ur; for (col=0; col < iwidth; col++) { uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5; if (uc > iwidth-2) continue; fc -= uc; pix = img + ur*iwidth + uc; image[row*iwidth+col][c] = (pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) + (pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr; } } free(img); } } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2); #endif } void CLASS pre_interpolate() { ushort (*img)[4]; int row, col, c; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2); #endif if (shrink) { if (half_size) { height = iheight; width = iwidth; if (filters == 9) { for (row=0; row < 3; row++) for (col=1; col < 4; col++) if (!(image[row*width+col][0] | image[row*width+col][2])) goto break2; break2: for ( ; row < height; row+=3) for (col=(col-1)%3+1; col < width-1; col+=3) { img = image + row*width+col; for (c=0; c < 3; c+=2) img[0][c] = (img[-1][c] + img[1][c]) >> 1; } } } else { img = (ushort (*)[4]) calloc (height, width*sizeof *img); merror (img, "pre_interpolate()"); for (row=0; row < height; row++) for (col=0; col < width; col++) { c = fcol(row,col); img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c]; } free (image); image = img; shrink = 0; } } if (filters > 1000 && colors == 3) { mix_green = four_color_rgb ^ half_size; if (four_color_rgb | half_size) colors++; else { for (row = FC(1,0) >> 1; row < height; row+=2) for (col = FC(row,1) & 1; col < width; col+=2) image[row*width+col][1] = image[row*width+col][3]; filters &= ~((filters & 0x55555555) << 1); } } if (half_size) filters = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2); #endif } void CLASS border_interpolate (int border) { unsigned row, col, y, x, f, c, sum[8]; for (row=0; row < height; row++) for (col=0; col < width; col++) { if (col==border && row >= border && row < height-border) col = width-border; memset (sum, 0, sizeof sum); for (y=row-1; y != row+2; y++) for (x=col-1; x != col+2; x++) if (y < height && x < width) { f = fcol(y,x); sum[f] += image[y*width+x][f]; sum[f+4]++; } f = fcol(row,col); FORCC if (c != f && sum[c+4]) image[row*width+col][c] = sum[c] / sum[c+4]; } } void CLASS lin_interpolate_loop(int code[16][16][32],int size) { int row; for (row=1; row < height-1; row++) { int col,*ip; ushort *pix; for (col=1; col < width-1; col++) { int i; int sum[4]; pix = image[row*width+col]; ip = code[row % size][col % size]; memset (sum, 0, sizeof sum); for (i=*ip++; i--; ip+=3) sum[ip[2]] += pix[ip[0]] << ip[1]; for (i=colors; --i; ip+=2) pix[ip[0]] = sum[ip[0]] * ip[1] >> 8; } } } void CLASS lin_interpolate() { int code[16][16][32], size=16, *ip, sum[4]; int f, c, x, y, row, col, shift, color; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Bilinear interpolation...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3); #endif if (filters == 9) size = 6; border_interpolate(1); for (row=0; row < size; row++) for (col=0; col < size; col++) { ip = code[row][col]+1; f = fcol(row,col); memset (sum, 0, sizeof sum); for (y=-1; y <= 1; y++) for (x=-1; x <= 1; x++) { shift = (y==0) + (x==0); color = fcol(row+y,col+x); if (color == f) continue; *ip++ = (width*y + x)*4 + color; *ip++ = shift; *ip++ = color; sum[color] += 1 << shift; } code[row][col][0] = (ip - code[row][col]) / 3; FORCC if (c != f) { *ip++ = c; *ip++ = sum[c]>0?256 / sum[c]:0; } } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3); #endif lin_interpolate_loop(code,size); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3); #endif } /* This algorithm is officially called: "Interpolation using a Threshold-based variable number of gradients" described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html I've extended the basic idea to work with non-Bayer filter arrays. Gradients are numbered clockwise from NW=0 to W=7. */ void CLASS vng_interpolate() { static const signed char *cp, terms[] = { -2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01, -2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01, -2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03, -2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06, -2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04, -1,-2,-1,+0,0,0x80, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01, -1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,0x88, -1,-1,+1,-2,0,0x40, -1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11, -1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11, -1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22, -1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44, -1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10, -1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04, +0,-2,+0,+0,1,0x80, +0,-1,+0,+1,1,0x88, +0,-1,+1,-2,0,0x40, +0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20, +0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08, +0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20, +0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44, +0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60, +0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,0x80, +1,-1,+1,+1,0,0x88, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40, +1,+0,+2,+1,0,0x10 }, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 }; ushort (*brow[5])[4], *pix; int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4]; int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag; int g, diff, thold, num, c; lin_interpolate(); #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("VNG interpolation...\n")); #endif if (filters == 1) prow = pcol = 16; if (filters == 9) prow = pcol = 6; ip = (int *) calloc (prow*pcol, 1280); merror (ip, "vng_interpolate()"); for (row=0; row < prow; row++) /* Precalculate for VNG */ for (col=0; col < pcol; col++) { code[row][col] = ip; for (cp=terms, t=0; t < 64; t++) { y1 = *cp++; x1 = *cp++; y2 = *cp++; x2 = *cp++; weight = *cp++; grads = *cp++; color = fcol(row+y1,col+x1); if (fcol(row+y2,col+x2) != color) continue; diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1; if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue; *ip++ = (y1*width + x1)*4 + color; *ip++ = (y2*width + x2)*4 + color; *ip++ = weight; for (g=0; g < 8; g++) if (grads & 1<<g) *ip++ = g; *ip++ = -1; } *ip++ = INT_MAX; for (cp=chood, g=0; g < 8; g++) { y = *cp++; x = *cp++; *ip++ = (y*width + x) * 4; color = fcol(row,col); if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color) *ip++ = (y*width + x) * 8 + color; else *ip++ = 0; } } brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow); merror (brow[4], "vng_interpolate()"); for (row=0; row < 3; row++) brow[row] = brow[4] + row*width; for (row=2; row < height-2; row++) { /* Do VNG interpolation */ #ifdef LIBRAW_LIBRARY_BUILD if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1); #endif for (col=2; col < width-2; col++) { pix = image[row*width+col]; ip = code[row % prow][col % pcol]; memset (gval, 0, sizeof gval); while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */ diff = ABS(pix[g] - pix[ip[1]]) << ip[2]; gval[ip[3]] += diff; ip += 5; if ((g = ip[-1]) == -1) continue; gval[g] += diff; while ((g = *ip++) != -1) gval[g] += diff; } ip++; gmin = gmax = gval[0]; /* Choose a threshold */ for (g=1; g < 8; g++) { if (gmin > gval[g]) gmin = gval[g]; if (gmax < gval[g]) gmax = gval[g]; } if (gmax == 0) { memcpy (brow[2][col], pix, sizeof *image); continue; } thold = gmin + (gmax >> 1); memset (sum, 0, sizeof sum); color = fcol(row,col); for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */ if (gval[g] <= thold) { FORCC if (c == color && ip[1]) sum[c] += (pix[c] + pix[ip[1]]) >> 1; else sum[c] += pix[ip[0] + c]; num++; } } FORCC { /* Save to buffer */ t = pix[color]; if (c != color) t += (sum[c] - sum[color]) / num; brow[2][col][c] = CLIP(t); } } if (row > 3) /* Write buffer to image */ memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image); for (g=0; g < 4; g++) brow[(g-1) & 3] = brow[g]; } memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image); memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image); free (brow[4]); free (code[0][0]); } /* Patterned Pixel Grouping Interpolation by Alain Desbiolles */ void CLASS ppg_interpolate() { int dir[5] = { 1, width, -1, -width, 1 }; int row, col, diff[2], guess[2], c, d, i; ushort (*pix)[4]; border_interpolate(3); #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("PPG interpolation...\n")); #endif /* Fill in the green layer with gradients and pattern recognition: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=3; row < height-3; row++) for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]) > 0; i++) { guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2 - pix[-2*d][c] - pix[2*d][c]; diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) + ABS(pix[ 2*d][c] - pix[ 0][c]) + ABS(pix[ -d][1] - pix[ d][1]) ) * 3 + ( ABS(pix[ 3*d][1] - pix[ d][1]) + ABS(pix[-3*d][1] - pix[-d][1]) ) * 2; } d = dir[i = diff[0] > diff[1]]; pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]); } /* Calculate red and blue for each green pixel: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=1; row < height-1; row++) for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]) > 0; c=2-c, i++) pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1] - pix[-d][1] - pix[d][1]) >> 1); } /* Calculate blue for red pixels and vice versa: */ #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3); #ifdef LIBRAW_USE_OPENMP #pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static) #endif #endif for (row=1; row < height-1; row++) for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) { pix = image + row*width+col; for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) { diff[i] = ABS(pix[-d][c] - pix[d][c]) + ABS(pix[-d][1] - pix[0][1]) + ABS(pix[ d][1] - pix[0][1]); guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1] - pix[-d][1] - pix[d][1]; } if (diff[0] != diff[1]) pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1); else pix[0][c] = CLIP((guess[0]+guess[1]) >> 2); } } void CLASS cielab (ushort rgb[3], short lab[3]) { int c, i, j, k; float r, xyz[3]; #ifdef LIBRAW_NOTHREADS static float cbrt[0x10000], xyz_cam[3][4]; #else #define cbrt tls->ahd_data.cbrt #define xyz_cam tls->ahd_data.xyz_cam #endif if (!rgb) { #ifndef LIBRAW_NOTHREADS if(cbrt[0] < -1.0f) #endif for (i=0; i < 0x10000; i++) { r = i / 65535.0; cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f; } for (i=0; i < 3; i++) for (j=0; j < colors; j++) for (xyz_cam[i][j] = k=0; k < 3; k++) xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i]; return; } xyz[0] = xyz[1] = xyz[2] = 0.5; FORCC { xyz[0] += xyz_cam[0][c] * rgb[c]; xyz[1] += xyz_cam[1][c] * rgb[c]; xyz[2] += xyz_cam[2][c] * rgb[c]; } xyz[0] = cbrt[CLIP((int) xyz[0])]; xyz[1] = cbrt[CLIP((int) xyz[1])]; xyz[2] = cbrt[CLIP((int) xyz[2])]; lab[0] = 64 * (116 * xyz[1] - 16); lab[1] = 64 * 500 * (xyz[0] - xyz[1]); lab[2] = 64 * 200 * (xyz[1] - xyz[2]); #ifndef LIBRAW_NOTHREADS #undef cbrt #undef xyz_cam #endif } #define TS 512 /* Tile Size */ #define fcol(row,col) xtrans[(row+6) % 6][(col+6) % 6] /* Frank Markesteijn's algorithm for Fuji X-Trans sensors */ void CLASS xtrans_interpolate (int passes) { int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol; int val, ndir, pass, hm[8], avg[4], color[3][8]; static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 }, patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 }, { 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } }, dir[4] = { 1,TS,TS+1,TS-1 }; short allhex[3][3][2][8], *hex; ushort min, max, sgrow, sgcol; ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4]; short (*lab) [TS][3], (*lix)[3]; float (*drv)[TS][TS], diff[6], tr; char (*homo)[TS][TS], *buffer; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes); #endif cielab (0,0); ndir = 4 << (passes > 1); buffer = (char *) malloc (TS*TS*(ndir*11+6)); merror (buffer, "xtrans_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6)); drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6)); homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6)); /* Map a green hexagon around each non-green pixel and vice versa: */ for (row=0; row < 3; row++) for (col=0; col < 3; col++) for (ng=d=0; d < 10; d+=2) { g = fcol(row,col) == 1; if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++; if (ng == 4) { sgrow = row; sgcol = col; } if (ng == g+1) FORC(8) { v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1]; h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1]; allhex[row][col][0][c^(g*2 & d)] = h + v*width; allhex[row][col][1][c^(g*2 & d)] = h + v*TS; } } /* Set green1 and green3 to the minimum and maximum allowed values: */ for (row=2; row < height-2; row++) for (min=~(max=0), col=2; col < width-2; col++) { if (fcol(row,col) == 1 && (min=~(max=0))) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][0]; if (!max) FORC(6) { val = pix[hex[c]][1]; if (min > val) min = val; if (max < val) max = val; } pix[0][1] = min; pix[0][3] = max; switch ((row-sgrow) % 3) { case 1: if (row < height-3) { row++; col--; } break; case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--; } } for (top=3; top < height-19; top += TS-16) for (left=3; left < width-19; left += TS-16) { mrow = MIN (top+TS, height-3); mcol = MIN (left+TS, width-3); for (row=top; row < mrow; row++) for (col=left; col < mcol; col++) memcpy (rgb[0][row-top][col-left], image[row*width+col], 6); FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb); /* Interpolate green horizontally, vertically, and along both diagonals: */ for (row=top; row < mrow; row++) for (col=left; col < mcol; col++) { if ((f = fcol(row,col)) == 1) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][0]; color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) - 46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]); color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 + 92 * (pix[ 0 ][f] - pix[ -hex[2]][f]); FORC(2) color[1][2+c] = 164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 * (2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]); FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] = LIM(color[1][c] >> 8,pix[0][1],pix[0][3]); } for (pass=0; pass < passes; pass++) { if (pass == 1) memcpy (rgb+=4, buffer, 4*sizeof *rgb); /* Recalculate green from interpolated values of closer pixels: */ if (pass) { for (row=top+2; row < mrow-2; row++) for (col=left+2; col < mcol-2; col++) { if ((f = fcol(row,col)) == 1) continue; pix = image + row*width + col; hex = allhex[row % 3][col % 3][1]; for (d=3; d < 6; d++) { rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left]; val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1] - rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f]; rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]); } } } /* Interpolate red and blue values for solitary green pixels: */ for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3) for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) { rix = &rgb[0][row-top][col-left]; h = fcol(row,col+1); memset (diff, 0, sizeof diff); for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) { for (c=0; c < 2; c++, h^=2) { g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1]; color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h]; if (d > 1) diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1] - rix[i<<c][h] + rix[-i<<c][h]) + SQR(g); } if (d > 1 && (d & 1)) if (diff[d-1] < diff[d]) FORC(2) color[c*2][d] = color[c*2][d-1]; if (d < 2 || (d & 1)) { FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2); rix += TS*TS; } } } /* Interpolate red for blue pixels and vice versa: */ for (row=top+3; row < mrow-3; row++) for (col=left+3; col < mcol-3; col++) { if ((f = 2-fcol(row,col)) == 1) continue; rix = &rgb[0][row-top][col-left]; c = (row-sgrow) % 3 ? TS:1; h = 3 * (c ^ TS ^ 1); for (d=0; d < 4; d++, rix += TS*TS) { i = d > 1 || ((d ^ c) & 1) || ((ABS(rix[0][1]-rix[c][1])+ABS(rix[0][1]-rix[-c][1])) < 2*(ABS(rix[0][1]-rix[h][1])+ABS(rix[0][1]-rix[-h][1]))) ? c:h; rix[0][f] = CLIP((rix[i][f] + rix[-i][f] + 2*rix[0][1] - rix[i][1] - rix[-i][1])/2); } } /* Fill in red and blue for 2x2 blocks of green: */ for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3) for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) { rix = &rgb[0][row-top][col-left]; hex = allhex[row % 3][col % 3][1]; for (d=0; d < ndir; d+=2, rix += TS*TS) if (hex[d] + hex[d+1]) { g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1]; for (c=0; c < 4; c+=2) rix[0][c] = CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3); } else { g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1]; for (c=0; c < 4; c+=2) rix[0][c] = CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2); } } } rgb = (ushort(*)[TS][TS][3]) buffer; mrow -= top; mcol -= left; /* Convert to CIELab and differentiate in all directions: */ for (d=0; d < ndir; d++) { for (row=2; row < mrow-2; row++) for (col=2; col < mcol-2; col++) cielab (rgb[d][row][col], lab[row][col]); for (f=dir[d & 3],row=3; row < mrow-3; row++) for (col=3; col < mcol-3; col++) { lix = &lab[row][col]; g = 2*lix[0][0] - lix[f][0] - lix[-f][0]; drv[d][row][col] = SQR(g) + SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232)) + SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580)); } } /* Build homogeneity maps from the derivatives: */ memset(homo, 0, ndir*TS*TS); for (row=4; row < mrow-4; row++) for (col=4; col < mcol-4; col++) { for (tr=FLT_MAX, d=0; d < ndir; d++) if (tr > drv[d][row][col]) tr = drv[d][row][col]; tr *= 8; for (d=0; d < ndir; d++) for (v=-1; v <= 1; v++) for (h=-1; h <= 1; h++) if (drv[d][row+v][col+h] <= tr) homo[d][row][col]++; } /* Average the most homogenous pixels for the final result: */ if (height-top < TS+4) mrow = height-top+2; if (width-left < TS+4) mcol = width-left+2; for (row = MIN(top,8); row < mrow-8; row++) for (col = MIN(left,8); col < mcol-8; col++) { for (d=0; d < ndir; d++) for (hm[d]=0, v=-2; v <= 2; v++) for (h=-2; h <= 2; h++) hm[d] += homo[d][row+v][col+h]; for (d=0; d < ndir-4; d++) if (hm[d] < hm[d+4]) hm[d ] = 0; else if (hm[d] > hm[d+4]) hm[d+4] = 0; for (max=hm[0],d=1; d < ndir; d++) if (max < hm[d]) max = hm[d]; max -= max >> 3; memset (avg, 0, sizeof avg); for (d=0; d < ndir; d++) if (hm[d] >= max) { FORC3 avg[c] += rgb[d][row][col][c]; avg[3]++; } FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3]; } } free(buffer); border_interpolate(8); } #undef fcol /* Adaptive Homogeneity-Directed interpolation is based on the work of Keigo Hirakawa, Thomas Parks, and Paul Lee. */ #ifdef LIBRAW_LIBRARY_BUILD void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3]) { int row, col; int c, val; ushort (*pix)[4]; const int rowlimit = MIN(top+TS, height-2); const int collimit = MIN(left+TS, width-2); for (row = top; row < rowlimit; row++) { col = left + (FC(row,left) & 1); for (c = FC(row,col); col < collimit; col+=2) { pix = image + row*width+col; val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2; out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]); val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2*width][c] - pix[2*width][c]) >> 2; out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]); } } } void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3]) { unsigned row, col; int c, val; ushort (*pix)[4]; ushort (*rix)[3]; short (*lix)[3]; float xyz[3]; const unsigned num_pix_per_row = 4*width; const unsigned rowlimit = MIN(top+TS-1, height-3); const unsigned collimit = MIN(left+TS-1, width-3); ushort *pix_above; ushort *pix_below; int t1, t2; for (row = top+1; row < rowlimit; row++) { pix = image + row*width + left; rix = &inout_rgb[row-top][0]; lix = &out_lab[row-top][0]; for (col = left+1; col < collimit; col++) { pix++; pix_above = &pix[0][0] - num_pix_per_row; pix_below = &pix[0][0] + num_pix_per_row; rix++; lix++; c = 2 - FC(row, col); if (c == 1) { c = FC(row+1,col); t1 = 2-c; val = pix[0][1] + (( pix[-1][t1] + pix[1][t1] - rix[-1][1] - rix[1][1] ) >> 1); rix[0][t1] = CLIP(val); val = pix[0][1] + (( pix_above[c] + pix_below[c] - rix[-TS][1] - rix[TS][1] ) >> 1); } else { t1 = -4+c; /* -4+c: pixel of color c to the left */ t2 = 4+c; /* 4+c: pixel of color c to the right */ val = rix[0][1] + (( pix_above[t1] + pix_above[t2] + pix_below[t1] + pix_below[t2] - rix[-TS-1][1] - rix[-TS+1][1] - rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2); } rix[0][c] = CLIP(val); c = FC(row,col); rix[0][c] = pix[0][c]; cielab(rix[0],lix[0]); } } } void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3]) { int direction; for (direction = 0; direction < 2; direction++) { ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]); } } void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2]) { int row, col; int tr, tc; int direction; int i; short (*lix)[3]; short (*lixs[2])[3]; short *adjacent_lix; unsigned ldiff[2][4], abdiff[2][4], leps, abeps; static const int dir[4] = { -1, 1, -TS, TS }; const int rowlimit = MIN(top+TS-2, height-4); const int collimit = MIN(left+TS-2, width-4); int homogeneity; char (*homogeneity_map_p)[2]; memset (out_homogeneity_map, 0, 2*TS*TS); for (row=top+2; row < rowlimit; row++) { tr = row-top; homogeneity_map_p = &out_homogeneity_map[tr][1]; for (direction=0; direction < 2; direction++) { lixs[direction] = &lab[direction][tr][1]; } for (col=left+2; col < collimit; col++) { tc = col-left; homogeneity_map_p++; for (direction=0; direction < 2; direction++) { lix = ++lixs[direction]; for (i=0; i < 4; i++) { adjacent_lix = lix[dir[i]]; ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]); abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1]) + SQR(lix[0][2]-adjacent_lix[2]); } } leps = MIN(MAX(ldiff[0][0],ldiff[0][1]), MAX(ldiff[1][2],ldiff[1][3])); abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]), MAX(abdiff[1][2],abdiff[1][3])); for (direction=0; direction < 2; direction++) { homogeneity = 0; for (i=0; i < 4; i++) { if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) { homogeneity++; } } homogeneity_map_p[0][direction] = homogeneity; } } } } void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2]) { int row, col; int tr, tc; int i, j; int direction; int hm[2]; int c; const int rowlimit = MIN(top+TS-3, height-5); const int collimit = MIN(left+TS-3, width-5); ushort (*pix)[4]; ushort (*rix[2])[3]; for (row=top+3; row < rowlimit; row++) { tr = row-top; pix = &image[row*width+left+2]; for (direction = 0; direction < 2; direction++) { rix[direction] = &rgb[direction][tr][2]; } for (col=left+3; col < collimit; col++) { tc = col-left; pix++; for (direction = 0; direction < 2; direction++) { rix[direction]++; } for (direction=0; direction < 2; direction++) { hm[direction] = 0; for (i=tr-1; i <= tr+1; i++) { for (j=tc-1; j <= tc+1; j++) { hm[direction] += homogeneity_map[i][j][direction]; } } } if (hm[0] != hm[1]) { memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort)); } else { FORC3 { pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1; } } } } } void CLASS ahd_interpolate() { int i, j, k, top, left; float xyz_cam[3][4],r; char *buffer; ushort (*rgb)[TS][TS][3]; short (*lab)[TS][TS][3]; char (*homo)[TS][2]; int terminate_flag = 0; cielab(0,0); border_interpolate(5); #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP #pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag) #endif #endif { buffer = (char *) malloc (26*TS*TS); /* 1664 kB */ merror (buffer, "ahd_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS); homo = (char (*)[TS][2]) (buffer + 24*TS*TS); #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP #pragma omp for schedule(dynamic) #endif #endif for (top=2; top < height-5; top += TS-6){ #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_USE_OPENMP if(0== omp_get_thread_num()) #endif if(callbacks.progress_cb) { int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7); if(rr) terminate_flag = 1; } #endif for (left=2; !terminate_flag && (left < width-5); left += TS-6) { ahd_interpolate_green_h_and_v(top, left, rgb); ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab); ahd_interpolate_build_homogeneity_map(top, left, lab, homo); ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo); } } free (buffer); } #ifdef LIBRAW_LIBRARY_BUILD if(terminate_flag) throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK; #endif } #else void CLASS ahd_interpolate() { int i, j, top, left, row, col, tr, tc, c, d, val, hm[2]; static const int dir[4] = { -1, 1, -TS, TS }; unsigned ldiff[2][4], abdiff[2][4], leps, abeps; ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4]; short (*lab)[TS][TS][3], (*lix)[3]; char (*homo)[TS][TS], *buffer; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("AHD interpolation...\n")); #endif cielab (0,0); border_interpolate(5); buffer = (char *) malloc (26*TS*TS); merror (buffer, "ahd_interpolate()"); rgb = (ushort(*)[TS][TS][3]) buffer; lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS); homo = (char (*)[TS][TS]) (buffer + 24*TS*TS); for (top=2; top < height-5; top += TS-6) for (left=2; left < width-5; left += TS-6) { /* Interpolate green horizontally and vertically: */ for (row=top; row < top+TS && row < height-2; row++) { col = left + (FC(row,left) & 1); for (c = FC(row,col); col < left+TS && col < width-2; col+=2) { pix = image + row*width+col; val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2; rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]); val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2*width][c] - pix[2*width][c]) >> 2; rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]); } } /* Interpolate red and blue, and convert to CIELab: */ for (d=0; d < 2; d++) for (row=top+1; row < top+TS-1 && row < height-3; row++) for (col=left+1; col < left+TS-1 && col < width-3; col++) { pix = image + row*width+col; rix = &rgb[d][row-top][col-left]; lix = &lab[d][row-top][col-left]; if ((c = 2 - FC(row,col)) == 1) { c = FC(row+1,col); val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c] - rix[-1][1] - rix[1][1] ) >> 1); rix[0][2-c] = CLIP(val); val = pix[0][1] + (( pix[-width][c] + pix[width][c] - rix[-TS][1] - rix[TS][1] ) >> 1); } else val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c] + pix[+width-1][c] + pix[+width+1][c] - rix[-TS-1][1] - rix[-TS+1][1] - rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2); rix[0][c] = CLIP(val); c = FC(row,col); rix[0][c] = pix[0][c]; cielab (rix[0],lix[0]); } /* Build homogeneity maps from the CIELab images: */ memset (homo, 0, 2*TS*TS); for (row=top+2; row < top+TS-2 && row < height-4; row++) { tr = row-top; for (col=left+2; col < left+TS-2 && col < width-4; col++) { tc = col-left; for (d=0; d < 2; d++) { lix = &lab[d][tr][tc]; for (i=0; i < 4; i++) { ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]); abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1]) + SQR(lix[0][2]-lix[dir[i]][2]); } } leps = MIN(MAX(ldiff[0][0],ldiff[0][1]), MAX(ldiff[1][2],ldiff[1][3])); abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]), MAX(abdiff[1][2],abdiff[1][3])); for (d=0; d < 2; d++) for (i=0; i < 4; i++) if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps) homo[d][tr][tc]++; } } /* Combine the most homogenous pixels for the final result: */ for (row=top+3; row < top+TS-3 && row < height-5; row++) { tr = row-top; for (col=left+3; col < left+TS-3 && col < width-5; col++) { tc = col-left; for (d=0; d < 2; d++) for (hm[d]=0, i=tr-1; i <= tr+1; i++) for (j=tc-1; j <= tc+1; j++) hm[d] += homo[d][i][j]; if (hm[0] != hm[1]) FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c]; else FORC3 image[row*width+col][c] = (rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1; } } } free (buffer); } #endif #undef TS void CLASS median_filter() { ushort (*pix)[4]; int pass, c, i, j, k, med[9]; static const uchar opt[] = /* Optimal 9-element median search */ { 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8, 0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 }; for (pass=1; pass <= med_passes; pass++) { #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes); #endif #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Median filter pass %d...\n"), pass); #endif for (c=0; c < 3; c+=2) { for (pix = image; pix < image+width*height; pix++) pix[0][3] = pix[0][c]; for (pix = image+width; pix < image+width*(height-1); pix++) { if ((pix-image+1) % width < 2) continue; for (k=0, i = -width; i <= width; i += width) for (j = i-1; j <= i+1; j++) med[k++] = pix[j][3] - pix[j][1]; for (i=0; i < sizeof opt; i+=2) if (med[opt[i]] > med[opt[i+1]]) SWAP (med[opt[i]] , med[opt[i+1]]); pix[0][c] = CLIP(med[4] + pix[0][1]); } } } } void CLASS blend_highlights() { int clip=INT_MAX, row, col, c, i, j; static const float trans[2][4][4] = { { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } }, { { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } }; static const float itrans[2][4][4] = { { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } }, { { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } }; float cam[2][4], lab[2][4], sum[2], chratio; if ((unsigned) (colors-3) > 1) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Blending highlights...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2); #endif FORCC if (clip > (i = 65535*pre_mul[c])) clip = i; for (row=0; row < height; row++) for (col=0; col < width; col++) { FORCC if (image[row*width+col][c] > clip) break; if (c == colors) continue; FORCC { cam[0][c] = image[row*width+col][c]; cam[1][c] = MIN(cam[0][c],clip); } for (i=0; i < 2; i++) { FORCC for (lab[i][c]=j=0; j < colors; j++) lab[i][c] += trans[colors-3][c][j] * cam[i][j]; for (sum[i]=0,c=1; c < colors; c++) sum[i] += SQR(lab[i][c]); } chratio = sqrt(sum[1]/sum[0]); for (c=1; c < colors; c++) lab[0][c] *= chratio; FORCC for (cam[0][c]=j=0; j < colors; j++) cam[0][c] += itrans[colors-3][c][j] * lab[0][j]; FORCC image[row*width+col][c] = cam[0][c] / colors; } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2); #endif } #define SCALE (4 >> shrink) void CLASS recover_highlights() { float *map, sum, wgt, grow; int hsat[4], count, spread, change, val, i; unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x; ushort *pixel; static const signed char dir[8][2] = { {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} }; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Rebuilding highlights...\n")); #endif grow = pow (2.0, 4-highlight); FORCC hsat[c] = 32000 * pre_mul[c]; for (kc=0, c=1; c < colors; c++) if (pre_mul[kc] < pre_mul[c]) kc = c; high = height / SCALE; wide = width / SCALE; map = (float *) calloc (high, wide*sizeof *map); merror (map, "recover_highlights()"); FORCC if (c != kc) { #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1); #endif memset (map, 0, high*wide*sizeof *map); for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { sum = wgt = count = 0; for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++) for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) { pixel = image[row*width+col]; if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) { sum += pixel[c]; wgt += pixel[kc]; count++; } } if (count == SCALE*SCALE) map[mrow*wide+mcol] = sum / wgt; } for (spread = 32/grow; spread--; ) { for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { if (map[mrow*wide+mcol]) continue; sum = count = 0; for (d=0; d < 8; d++) { y = mrow + dir[d][0]; x = mcol + dir[d][1]; if (y < high && x < wide && map[y*wide+x] > 0) { sum += (1 + (d & 1)) * map[y*wide+x]; count += 1 + (d & 1); } } if (count > 3) map[mrow*wide+mcol] = - (sum+grow) / (count+grow); } for (change=i=0; i < high*wide; i++) if (map[i] < 0) { map[i] = -map[i]; change = 1; } if (!change) break; } for (i=0; i < high*wide; i++) if (map[i] == 0) map[i] = 1; for (mrow=0; mrow < high; mrow++) for (mcol=0; mcol < wide; mcol++) { for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++) for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) { pixel = image[row*width+col]; if (pixel[c] / hsat[c] > 1) { val = pixel[kc] * map[mrow*wide+mcol]; if (pixel[c] < val) pixel[c] = CLIP(val); } } } } free (map); } #undef SCALE void CLASS tiff_get (unsigned base, unsigned *tag, unsigned *type, unsigned *len, unsigned *save) { *tag = get2(); *type = get2(); *len = get4(); *save = ftell(ifp) + 4; if (*len * ("11124811248484"[*type < 14 ? *type:0]-'0') > 4) fseek (ifp, get4()+base, SEEK_SET); } void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen) { unsigned entries, tag, type, len, save; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); if (tag == toff) thumb_offset = get4()+base; if (tag == tlen) thumb_length = get4(); fseek (ifp, save, SEEK_SET); } } //@end COMMON int CLASS parse_tiff_ifd (int base); //@out COMMON static float powf_lim(float a, float b, float limup) { return (b>limup || b < -limup)?0.f:powf(a,b); } static float powf64(float a, float b) { return powf_lim(a,b,64.f); } #ifdef LIBRAW_LIBRARY_BUILD static float my_roundf(float x) { float t; if (x >= 0.0) { t = ceilf(x); if (t - x > 0.5) t -= 1.0; return t; } else { t = ceilf(-x); if (t + x > 0.5) t -= 1.0; return -t; } } static float _CanonConvert2EV(short in) { float frac1; short val = in, sign = 1, frac; if (val < 0) { val = -val; sign = -1; } frac = (val & 0x1f); val -= frac; if (frac == 0x0c) frac1 = 32.0f / 3.0f; else if (frac == 0x14) frac1 = 64.0f / 3.0f; else frac1 = (float)frac; return (float)sign * ((float)val + frac1) / 32.0f; } static float _CanonConvertAperture(short in) { if (in == (short)0xffe0) return 0.0f; else return powf64(2.0f, _CanonConvert2EV(in) / 2.0f); } void CLASS setCanonBodyFeatures (unsigned id) { imgdata.lens.makernotes.CamID = id; if ( (id == 0x80000001) || // 1D (id == 0x80000174) || // 1D2 (id == 0x80000232) || // 1D2N (id == 0x80000169) || // 1D3 (id == 0x80000281) // 1D4 ) { imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSH; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF; } else if ( (id == 0x80000167) || // 1Ds (id == 0x80000188) || // 1Ds2 (id == 0x80000215) || // 1Ds3 (id == 0x80000213) || // 5D (id == 0x80000218) || // 5D2 (id == 0x80000285) || // 5D3 (id == 0x80000302) || // 6D (id == 0x80000269) || // 1DX (id == 0x80000324) || // 1DC (id == 0x80000382) || // 5DS (id == 0x80000401) // 5DS R ) { imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF; } else if ( (id == 0x80000331) || // M (id == 0x80000355) // M2 ) { imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF_M; } else if ( (id == 0x01140000) || // D30 (id == 0x01668000) || // D60 (id > 0x80000000) ) { imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Unknown; } else { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; } return; } void CLASS processCanonCameraInfo (unsigned id, uchar *CameraInfo) { ushort iCanonLensID = 0, iCanonMaxFocal = 0, iCanonMinFocal = 0, iCanonLens = 0, iCanonCurFocal = 0, iCanonFocalType = 0; CameraInfo[0] = 0; CameraInfo[1] = 0; switch (id) { case 0x80000001: // 1D case 0x80000167: // 1DS iCanonCurFocal = 10; iCanonLensID = 13; iCanonMinFocal = 14; iCanonMaxFocal = 16; if (!imgdata.lens.makernotes.CurFocal) imgdata.lens.makernotes.CurFocal = sget2(CameraInfo + iCanonCurFocal); if (!imgdata.lens.makernotes.MinFocal) imgdata.lens.makernotes.MinFocal = sget2(CameraInfo + iCanonMinFocal); if (!imgdata.lens.makernotes.MaxFocal) imgdata.lens.makernotes.MaxFocal = sget2(CameraInfo + iCanonMaxFocal); break; case 0x80000174: // 1DMkII case 0x80000188: // 1DsMkII iCanonCurFocal = 9; iCanonLensID = 12; iCanonMinFocal = 17; iCanonMaxFocal = 19; iCanonFocalType = 45; break; case 0x80000232: // 1DMkII N iCanonCurFocal = 9; iCanonLensID = 12; iCanonMinFocal = 17; iCanonMaxFocal = 19; break; case 0x80000169: // 1DMkIII case 0x80000215: // 1DsMkIII iCanonCurFocal = 29; iCanonLensID = 273; iCanonMinFocal = 275; iCanonMaxFocal = 277; break; case 0x80000281: // 1DMkIV iCanonCurFocal = 30; iCanonLensID = 335; iCanonMinFocal = 337; iCanonMaxFocal = 339; break; case 0x80000269: // 1D X iCanonCurFocal = 35; iCanonLensID = 423; iCanonMinFocal = 425; iCanonMaxFocal = 427; break; case 0x80000213: // 5D iCanonCurFocal = 40; if (!sget2Rev(CameraInfo + 12)) iCanonLensID = 151; else iCanonLensID = 12; iCanonMinFocal = 147; iCanonMaxFocal = 149; break; case 0x80000218: // 5DMkII iCanonCurFocal = 30; iCanonLensID = 230; iCanonMinFocal = 232; iCanonMaxFocal = 234; break; case 0x80000285: // 5DMkIII iCanonCurFocal = 35; iCanonLensID = 339; iCanonMinFocal = 341; iCanonMaxFocal = 343; break; case 0x80000302: // 6D iCanonCurFocal = 35; iCanonLensID = 353; iCanonMinFocal = 355; iCanonMaxFocal = 357; break; case 0x80000250: // 7D iCanonCurFocal = 30; iCanonLensID = 274; iCanonMinFocal = 276; iCanonMaxFocal = 278; break; case 0x80000190: // 40D iCanonCurFocal = 29; iCanonLensID = 214; iCanonMinFocal = 216; iCanonMaxFocal = 218; iCanonLens = 2347; break; case 0x80000261: // 50D iCanonCurFocal = 30; iCanonLensID = 234; iCanonMinFocal = 236; iCanonMaxFocal = 238; break; case 0x80000287: // 60D iCanonCurFocal = 30; iCanonLensID = 232; iCanonMinFocal = 234; iCanonMaxFocal = 236; break; case 0x80000325: // 70D iCanonCurFocal = 35; iCanonLensID = 358; iCanonMinFocal = 360; iCanonMaxFocal = 362; break; case 0x80000176: // 450D iCanonCurFocal = 29; iCanonLensID = 222; iCanonLens = 2355; break; case 0x80000252: // 500D iCanonCurFocal = 30; iCanonLensID = 246; iCanonMinFocal = 248; iCanonMaxFocal = 250; break; case 0x80000270: // 550D iCanonCurFocal = 30; iCanonLensID = 255; iCanonMinFocal = 257; iCanonMaxFocal = 259; break; case 0x80000286: // 600D case 0x80000288: // 1100D iCanonCurFocal = 30; iCanonLensID = 234; iCanonMinFocal = 236; iCanonMaxFocal = 238; break; case 0x80000301: // 650D case 0x80000326: // 700D iCanonCurFocal = 35; iCanonLensID = 295; iCanonMinFocal = 297; iCanonMaxFocal = 299; break; case 0x80000254: // 1000D iCanonCurFocal = 29; iCanonLensID = 226; iCanonMinFocal = 228; iCanonMaxFocal = 230; iCanonLens = 2359; break; } if (iCanonFocalType) { imgdata.lens.makernotes.FocalType = CameraInfo[iCanonFocalType]; if (!imgdata.lens.makernotes.FocalType) // zero means 'fixed' here, replacing with standard '1' imgdata.lens.makernotes.FocalType = 1; } if (!imgdata.lens.makernotes.CurFocal) imgdata.lens.makernotes.CurFocal = sget2Rev(CameraInfo + iCanonCurFocal); if (!imgdata.lens.makernotes.LensID) imgdata.lens.makernotes.LensID = sget2Rev(CameraInfo + iCanonLensID); if (!imgdata.lens.makernotes.MinFocal) imgdata.lens.makernotes.MinFocal = sget2Rev(CameraInfo + iCanonMinFocal); if (!imgdata.lens.makernotes.MaxFocal) imgdata.lens.makernotes.MaxFocal = sget2Rev(CameraInfo + iCanonMaxFocal); if (!imgdata.lens.makernotes.Lens[0] && iCanonLens) { if (CameraInfo[iCanonLens] < 65) // non-Canon lens memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 64); else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-S", 4)) { memcpy(imgdata.lens.makernotes.Lens, "EF-S ", 5); memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-E", 4); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S; memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60); } else if (!strncmp((char *)CameraInfo + iCanonLens, "TS-E", 4)) { memcpy(imgdata.lens.makernotes.Lens, "TS-E ", 5); memcpy(imgdata.lens.makernotes.LensFeatures_pre, "TS-E", 4); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60); } else if (!strncmp((char *)CameraInfo + iCanonLens, "MP-E", 4)) { memcpy(imgdata.lens.makernotes.Lens, "MP-E ", 5); memcpy(imgdata.lens.makernotes.LensFeatures_pre, "MP-E", 4); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60); } else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-M", 4)) { memcpy(imgdata.lens.makernotes.Lens, "EF-M ", 5); memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-M", 4); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M; memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60); } else { memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 2); memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF", 2); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; imgdata.lens.makernotes.Lens[2] = 32; memcpy(imgdata.lens.makernotes.Lens + 3, CameraInfo + iCanonLens + 2, 62); } } free(CameraInfo); return; } void CLASS processNikonLensData (uchar *LensData, unsigned len) { ushort i; if (len < 20) { switch (len) { case 9: i = 2; break; case 15: i = 7; break; case 16: i = 8; break; } imgdata.lens.nikon.NikonLensIDNumber = LensData[i]; imgdata.lens.nikon.NikonLensFStops = LensData[i + 1]; imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops /12.0f; imgdata.lens.makernotes.MinFocal = 5.0f * powf64(2.0f, (float)LensData[i + 2] / 24.0f); imgdata.lens.makernotes.MaxFocal = 5.0f * powf64(2.0f, (float)LensData[i + 3] / 24.0f); imgdata.lens.makernotes.MaxAp4MinFocal = powf64(2.0f, (float)LensData[i + 4] / 24.0f); imgdata.lens.makernotes.MaxAp4MaxFocal = powf64(2.0f, (float)LensData[i + 5] / 24.0f); imgdata.lens.nikon.NikonMCUVersion = LensData[i + 6]; if (i != 2) { imgdata.lens.makernotes.CurFocal = 5.0f * powf64(2.0f, (float)LensData[i - 1] / 24.0f); imgdata.lens.nikon.NikonEffectiveMaxAp = powf64(2.0f, (float)LensData[i + 7] / 24.0f); } imgdata.lens.makernotes.LensID = (unsigned long long) LensData[i] << 56 | (unsigned long long) LensData[i + 1] << 48 | (unsigned long long) LensData[i + 2] << 40 | (unsigned long long) LensData[i + 3] << 32 | (unsigned long long) LensData[i + 4] << 24 | (unsigned long long) LensData[i + 5] << 16 | (unsigned long long) LensData[i + 6] << 8 | (unsigned long long) imgdata.lens.nikon.NikonLensType; } else if ((len == 459) || (len == 590)) { memcpy(imgdata.lens.makernotes.Lens, LensData + 390, 64); } else if (len == 509) { memcpy(imgdata.lens.makernotes.Lens, LensData + 391, 64); } else if (len == 879) { memcpy(imgdata.lens.makernotes.Lens, LensData + 680, 64); } free (LensData); return; } void CLASS setOlympusBodyFeatures (unsigned long id) { imgdata.lens.makernotes.CamID = id; if ((id == 0x4434303430) || (id == 0x4434303431) || ((id >= 0x5330303030) && (id <= 0x5330303939))) { imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FT; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FT; } else { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; } if ((id == 0x4434303430) || (id == 0x4434303431) || ((id >= 0x5330303033) && (id <= 0x5330303138)) || (id == 0x5330303233) || (id == 0x5330303239) || (id == 0x5330303330) || (id == 0x5330303333)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FT; } else if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_mFT; } return; } void CLASS setPentaxBodyFeatures (unsigned id) { imgdata.lens.makernotes.CamID = id; switch (id) { case 0x12994: case 0x12aa2: case 0x12b1a: case 0x12b60: case 0x12b7e: case 0x12b80: case 0x12b9c: case 0x12b9d: case 0x12ba2: case 0x12c1e: case 0x12c20: case 0x12cd2: case 0x12cd4: case 0x12cfa: case 0x12d72: case 0x12d73: case 0x12db8: case 0x12dfe: case 0x12e6c: case 0x12e76: case 0x12ef8: case 0x12f52: case 0x12f70: case 0x12f71: case 0x12fb6: case 0x12fc0: case 0x12fca: case 0x1301a: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K; imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC; break; case 0x12e08: case 0x13010: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_645; imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_MF; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_645; imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_MF; break; case 0x12ee4: case 0x12f66: case 0x12f7a: case 0x1302e: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_Q; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_Q; break; default: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; } return; } void CLASS setPhaseOneFeatures (unsigned id) { ushort i; static const struct { ushort id; char t_model[32]; } p1_unique[] = { // Phase One section: {1, "Hasselblad V"}, {10, "PhaseOne/Mamiya"}, {12, "Contax 645"}, {16, "Hasselblad V"}, {17, "Hasselblad V"}, {18, "Contax 645"}, {19, "PhaseOne/Mamiya"}, {20, "Hasselblad V"}, {21, "Contax 645"}, {22, "PhaseOne/Mamiya"}, {23, "Hasselblad V"}, {24, "Hasselblad H"}, {25, "PhaseOne/Mamiya"}, {32, "Contax 645"}, {34, "Hasselblad V"}, {35, "Hasselblad V"}, {36, "Hasselblad H"}, {37, "Contax 645"}, {38, "PhaseOne/Mamiya"}, {39, "Hasselblad V"}, {40, "Hasselblad H"}, {41, "Contax 645"}, {42, "PhaseOne/Mamiya"}, {44, "Hasselblad V"}, {45, "Hasselblad H"}, {46, "Contax 645"}, {47, "PhaseOne/Mamiya"}, {48, "Hasselblad V"}, {49, "Hasselblad H"}, {50, "Contax 645"}, {51, "PhaseOne/Mamiya"}, {52, "Hasselblad V"}, {53, "Hasselblad H"}, {54, "Contax 645"}, {55, "PhaseOne/Mamiya"}, {67, "Hasselblad V"}, {68, "Hasselblad H"}, {69, "Contax 645"}, {70, "PhaseOne/Mamiya"}, {71, "Hasselblad V"}, {72, "Hasselblad H"}, {73, "Contax 645"}, {74, "PhaseOne/Mamiya"}, {76, "Hasselblad V"}, {77, "Hasselblad H"}, {78, "Contax 645"}, {79, "PhaseOne/Mamiya"}, {80, "Hasselblad V"}, {81, "Hasselblad H"}, {82, "Contax 645"}, {83, "PhaseOne/Mamiya"}, {84, "Hasselblad V"}, {85, "Hasselblad H"}, {86, "Contax 645"}, {87, "PhaseOne/Mamiya"}, {99, "Hasselblad V"}, {100, "Hasselblad H"}, {101, "Contax 645"}, {102, "PhaseOne/Mamiya"}, {103, "Hasselblad V"}, {104, "Hasselblad H"}, {105, "PhaseOne/Mamiya"}, {106, "Contax 645"}, {112, "Hasselblad V"}, {113, "Hasselblad H"}, {114, "Contax 645"}, {115, "PhaseOne/Mamiya"}, {131, "Hasselblad V"}, {132, "Hasselblad H"}, {133, "Contax 645"}, {134, "PhaseOne/Mamiya"}, {135, "Hasselblad V"}, {136, "Hasselblad H"}, {137, "Contax 645"}, {138, "PhaseOne/Mamiya"}, {140, "Hasselblad V"}, {141, "Hasselblad H"}, {142, "Contax 645"}, {143, "PhaseOne/Mamiya"}, {148, "Hasselblad V"}, {149, "Hasselblad H"}, {150, "Contax 645"}, {151, "PhaseOne/Mamiya"}, {160, "A-250"}, {161, "A-260"}, {162, "A-280"}, {167, "Hasselblad V"}, {168, "Hasselblad H"}, {169, "Contax 645"}, {170, "PhaseOne/Mamiya"}, {172, "Hasselblad V"}, {173, "Hasselblad H"}, {174, "Contax 645"}, {175, "PhaseOne/Mamiya"}, {176, "Hasselblad V"}, {177, "Hasselblad H"}, {178, "Contax 645"}, {179, "PhaseOne/Mamiya"}, {180, "Hasselblad V"}, {181, "Hasselblad H"}, {182, "Contax 645"}, {183, "PhaseOne/Mamiya"}, {208, "Hasselblad V"}, {211, "PhaseOne/Mamiya"}, {448, "Phase One 645AF"}, {457, "Phase One 645DF"}, {471, "Phase One 645DF+"}, {704, "Phase One iXA"}, {705, "Phase One iXA - R"}, {706, "Phase One iXU 150"}, {707, "Phase One iXU 150 - NIR"}, {708, "Phase One iXU 180"}, {721, "Phase One iXR"}, // Leaf section: {333,"Mamiya"}, {329,"Universal"}, {330,"Hasselblad H1/H2"}, {332,"Contax"}, {336,"AFi"}, {327,"Mamiya"}, {324,"Universal"}, {325,"Hasselblad H1/H2"}, {326,"Contax"}, {335,"AFi"}, {340,"Mamiya"}, {337,"Universal"}, {338,"Hasselblad H1/H2"}, {339,"Contax"}, {323,"Mamiya"}, {320,"Universal"}, {322,"Hasselblad H1/H2"}, {321,"Contax"}, {334,"AFi"}, {369,"Universal"}, {370,"Mamiya"}, {371,"Hasselblad H1/H2"}, {372,"Contax"}, {373,"Afi"}, }; imgdata.lens.makernotes.CamID = id; if (id && !imgdata.lens.makernotes.body[0]) { for (i=0; i < sizeof p1_unique / sizeof *p1_unique; i++) if (id == p1_unique[i].id) { strcpy(imgdata.lens.makernotes.body,p1_unique[i].t_model); } } return; } void CLASS setSonyBodyFeatures (unsigned id) { imgdata.lens.makernotes.CamID = id; if ( // FF cameras (id == 257) || // a900 (id == 269) || // a850 (id == 340) || // ILCE-7M2 (id == 318) || // ILCE-7S (id == 311) || // ILCE-7R (id == 306) || // ILCE-7 (id == 298) || // DSC-RX1 (id == 299) || // NEX-VG900 (id == 310) || // DSC-RX1R (id == 294) // SLT-99, Hasselblad HV ) { imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF; } else { if ((id != 002) && // DSC-R1 (id != 297) && // DSC-RX100 (id != 308) && // DSC-RX100M2 (id != 309) && // DSC-RX10 (id != 317)) // DSC-RX100M3 imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC; } if ( // E-mount cameras // ILCE: (id == 302) || (id == 306) || (id == 311) || (id == 312) || (id == 313) || (id == 318) || (id == 339) || (id == 340) || (id == 346) || // NEX: (id == 278) || (id == 279) || (id == 284) || (id == 288) || (id == 289) || (id == 290) || (id == 293) || (id == 295) || (id == 296) || (id == 299) || (id == 300) || (id == 305) || (id == 307) ) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Sony_E; } else if ( // A-mount cameras // DSLR: (id == 256) || (id == 257) || (id == 258) || (id == 259) || (id == 260) || (id == 261) || (id == 262) || (id == 263) || (id == 264) || (id == 265) || (id == 266) || (id == 269) || (id == 270) || (id == 273) || (id == 274) || (id == 275) || (id == 282) || (id == 283) || // SLT: (id == 280) || (id == 281) || (id == 285) || (id == 286) || (id == 287) || (id == 291) || (id == 292) || (id == 294) || (id == 303) || // ILCA: (id == 319) ) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A; } else if ( // DSC (id == 002) || // DSC-R1 (id == 297) || // DSC-RX100 (id == 298) || // DSC-RX1 (id == 308) || // DSC-RX100M2 (id == 309) || // DSC-RX10 (id == 310) || // DSC-RX1R (id == 317) // DSC-RX100M3 ) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; } return; } void CLASS parseSonyLensType2 (uchar a, uchar b) { ushort lid2; lid2 = (((ushort)a)<<8) | ((ushort)b); if (!lid2) return; if (lid2 < 0x100) { imgdata.lens.makernotes.AdapterID = lid2; switch (lid2) { case 1: case 2: case 3: case 6: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break; case 44: case 78: case 239: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; break; } } else imgdata.lens.makernotes.LensID = lid2; return; } void CLASS parseSonyLensFeatures (uchar a, uchar b) { ushort features; features = (((ushort)a)<<8) | ((ushort)b); if ((imgdata.lens.makernotes.LensMount == LIBRAW_MOUNT_Canon_EF) || !features) return; imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FF; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; imgdata.lens.makernotes.LensFeatures_pre[0] = 0; imgdata.lens.makernotes.LensFeatures_suf[0] = 0; if ((features & 0x0200) && (features & 0x0100)) { strcpy(imgdata.lens.makernotes.LensFeatures_pre, "E"); imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; } else if (features & 0x0200) { strcpy(imgdata.lens.makernotes.LensFeatures_pre, "FE"); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; } else if (features & 0x0100) { strcpy(imgdata.lens.makernotes.LensFeatures_pre, "DT"); imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC; } if (features & 0x4000) strncat(imgdata.lens.makernotes.LensFeatures_pre, " PZ", sizeof(imgdata.lens.makernotes.LensFeatures_pre)); if (features & 0x0008) strncat(imgdata.lens.makernotes.LensFeatures_suf, " G", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); else if (features & 0x0004) strncat(imgdata.lens.makernotes.LensFeatures_suf, " ZA", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); if ((features & 0x0020) && (features & 0x0040)) strncat(imgdata.lens.makernotes.LensFeatures_suf, " Macro", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); else if (features & 0x0020) strncat(imgdata.lens.makernotes.LensFeatures_suf, " STF", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); else if (features & 0x0040) strncat(imgdata.lens.makernotes.LensFeatures_suf, " Reflex", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); else if (features & 0x0080) strncat(imgdata.lens.makernotes.LensFeatures_suf, " Fisheye", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); if (features & 0x0001) strncat(imgdata.lens.makernotes.LensFeatures_suf, " SSM", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); else if (features & 0x0002) strncat(imgdata.lens.makernotes.LensFeatures_suf, " SAM", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); if (features & 0x8000) strncat(imgdata.lens.makernotes.LensFeatures_suf, " OSS", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); if (features & 0x2000) strncat(imgdata.lens.makernotes.LensFeatures_suf, " LE", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); if (features & 0x0800) strncat(imgdata.lens.makernotes.LensFeatures_suf, " II", sizeof(imgdata.lens.makernotes.LensFeatures_suf)); if (imgdata.lens.makernotes.LensFeatures_suf[0] == ' ') memmove(imgdata.lens.makernotes.LensFeatures_suf, imgdata.lens.makernotes.LensFeatures_suf+1, strlen(imgdata.lens.makernotes.LensFeatures_suf)); return; } void CLASS process_Sony_0x940c (uchar * buf) { ushort lid2; if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) { switch (SonySubstitution[buf[0x0008]]) { case 1: case 5: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break; case 4: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break; } } lid2 = (((ushort)SonySubstitution[buf[0x000a]])<<8) | ((ushort)SonySubstitution[buf[0x0009]]); if ((lid2 > 0) && (lid2 < 32784)) parseSonyLensType2 (SonySubstitution[buf[0x000a]], // LensType2 - Sony lens ids SonySubstitution[buf[0x0009]]); return; } void CLASS process_Sony_0x9050 (uchar * buf, unsigned id) { ushort lid; if ((imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_Sony_E) && (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)) { if (buf[0]) imgdata.lens.makernotes.MaxAp = my_roundf(powf64(2.0f, ((float)SonySubstitution[buf[0]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f; if (buf[1]) imgdata.lens.makernotes.MinAp = my_roundf(powf64(2.0f, ((float)SonySubstitution[buf[1]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f; } if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens) { if (buf[0x3d] | buf[0x3c]) { lid = SonySubstitution[buf[0x3d]] << 8 | SonySubstitution[buf[0x3c]]; imgdata.lens.makernotes.CurAp = powf64(2.0f, ((float)lid/256.0f - 16.0f) / 2.0f); } if (buf[0x105] && (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)) imgdata.lens.makernotes.LensMount = SonySubstitution[buf[0x105]]; if (buf[0x106]) imgdata.lens.makernotes.LensFormat = SonySubstitution[buf[0x106]]; } if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E) { parseSonyLensType2 (SonySubstitution[buf[0x0108]], // LensType2 - Sony lens ids SonySubstitution[buf[0x0107]]); } if ((imgdata.lens.makernotes.LensID == -1) && (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A) && (buf[0x010a] | buf[0x0109])) { imgdata.lens.makernotes.LensID = // LensType - Minolta/Sony lens ids SonySubstitution[buf[0x010a]] << 8 | SonySubstitution[buf[0x0109]]; if ((imgdata.lens.makernotes.LensID > 61184) && (imgdata.lens.makernotes.LensID < 65535)) { imgdata.lens.makernotes.LensID -= 61184; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; } } if ((id >= 286) && (id <= 293)) // "SLT-A65", "SLT-A77", "NEX-7", "NEX-VG20E", // "SLT-A37", "SLT-A57", "NEX-F3", "Lunar" parseSonyLensFeatures (SonySubstitution[buf[0x115]], SonySubstitution[buf[0x116]]); else if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens) parseSonyLensFeatures (SonySubstitution[buf[0x116]], SonySubstitution[buf[0x117]]); return; } void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer) { unsigned offset = 0, entries, tag, type, len, save, c; unsigned i; uchar NikonKey, ci, cj, ck; unsigned serial = 0; unsigned NikonLensDataVersion = 0; unsigned lenNikonLensData = 0; uchar *CanonCameraInfo; unsigned lenCanonCameraInfo = 0; uchar *table_buf; uchar *table_buf_0x9050; ushort table_buf_0x9050_present = 0; uchar *table_buf_0x940c; ushort table_buf_0x940c_present = 0; short morder, sorder = order; char buf[10]; fread(buf, 1, 10, ifp); if (!strcmp(buf, "Nikon")) { base = ftell(ifp); order = get2(); if (get2() != 42) goto quit; offset = get4(); fseek(ifp, offset - 8, SEEK_CUR); } else if (!strcmp(buf, "OLYMPUS") || !strcmp(buf, "PENTAX ") || (!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG))) { base = ftell(ifp) - 10; fseek(ifp, -2, SEEK_CUR); order = get2(); if (buf[0] == 'O') get2(); } else if (!strncmp(buf, "SONY", 4) || !strcmp(buf, "Panasonic")) { goto nf; } else if (!strncmp(buf, "FUJIFILM", 8)) { base = ftell(ifp) - 10; nf: order = 0x4949; fseek(ifp, 2, SEEK_CUR); } else if (!strcmp(buf, "OLYMP") || !strcmp(buf, "LEICA") || !strcmp(buf, "Ricoh") || !strcmp(buf, "EPSON")) fseek(ifp, -2, SEEK_CUR); else if (!strcmp(buf, "AOC") || !strcmp(buf, "QVC")) fseek(ifp, -4, SEEK_CUR); else { fseek(ifp, -10, SEEK_CUR); if ((!strncmp(make, "SAMSUNG", 7) && (dng_writer == AdobeDNG))) base = ftell(ifp); } entries = get2(); // if (dng_writer == AdobeDNG) // printf("\n*** parse_makernote_0xc634: AdobeDNG"); // else if (dng_writer == CameraDNG) // printf("\n*** parse_makernote_0xc634: CameraDNG"); // printf ("\n\tbuf =%s=\n\tmake =%s=\n\tmodel =%s=\n\tbase: 0x%x\n\tentries: %d\n", // buf, make, model, base, entries); if (entries > 1000) return; morder = order; while (entries--) { order = morder; tiff_get(base, &tag, &type, &len, &save); tag |= uptag << 16; // printf ("\n\tbase: 0x%x tag: 0x%04x type: 0x%x len: 0x%x pos: 0x%llx", // base, tag, type, len, ftell(ifp)); if (!strcmp(make, "Canon")) { if (tag == 0x0001) // camera settings { fseek(ifp, 44, SEEK_CUR); imgdata.lens.makernotes.LensID = get2(); imgdata.lens.makernotes.MaxFocal = get2(); imgdata.lens.makernotes.MinFocal = get2(); imgdata.lens.makernotes.CanonFocalUnits = get2(); if (imgdata.lens.makernotes.CanonFocalUnits != 1) { imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; } imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2()); imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2()); } else if (tag == 0x0002) // focal length { imgdata.lens.makernotes.FocalType = get2(); imgdata.lens.makernotes.CurFocal = get2(); if ((imgdata.lens.makernotes.CanonFocalUnits != 1) && imgdata.lens.makernotes.CanonFocalUnits) { imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; } } else if (tag == 0x0004) // shot info { fseek(ifp, 42, SEEK_CUR); imgdata.lens.makernotes.CurAp = _CanonConvertAperture(get2()); } else if (tag == 0x000d) // camera info { CanonCameraInfo = (uchar*)malloc(len); fread(CanonCameraInfo, len, 1, ifp); lenCanonCameraInfo = len; } else if (tag == 0x10) // Canon ModelID { unique_id = get4(); setCanonBodyFeatures(unique_id); if (lenCanonCameraInfo) processCanonCameraInfo(unique_id, CanonCameraInfo); } else if (tag == 0x0095 && // lens model tag !imgdata.lens.makernotes.Lens[0]) { fread(imgdata.lens.makernotes.Lens, 2, 1, ifp); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; if (imgdata.lens.makernotes.Lens[0] < 65) // non-Canon lens fread(imgdata.lens.makernotes.Lens + 2, 62, 1, ifp); else { char efs[2]; imgdata.lens.makernotes.LensFeatures_pre[0] = imgdata.lens.makernotes.Lens[0]; imgdata.lens.makernotes.LensFeatures_pre[1] = imgdata.lens.makernotes.Lens[1]; fread(efs, 2, 1, ifp); if (efs[0] == 45 && (efs[1] == 83 || efs[1] == 69 || efs[1] == 77)) { // "EF-S, TS-E, MP-E, EF-M" lenses imgdata.lens.makernotes.Lens[2] = imgdata.lens.makernotes.LensFeatures_pre[2] = efs[0]; imgdata.lens.makernotes.Lens[3] = imgdata.lens.makernotes.LensFeatures_pre[3] = efs[1]; imgdata.lens.makernotes.Lens[4] = 32; if (efs[1] == 83) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S; imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC; } else if (efs[1] == 77) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M; } } else { // "EF" lenses imgdata.lens.makernotes.Lens[2] = 32; imgdata.lens.makernotes.Lens[3] = efs[0]; imgdata.lens.makernotes.Lens[4] = efs[1]; } fread(imgdata.lens.makernotes.Lens + 5, 58, 1, ifp); } } } else if (!strncmp(make, "FUJI", 4)) switch (tag) { case 0x1404: imgdata.lens.makernotes.MinFocal = getreal(type); break; case 0x1405: imgdata.lens.makernotes.MaxFocal = getreal(type); break; case 0x1406: imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); break; case 0x1407: imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); break; } else if (!strncasecmp(make, "LEICA", 5)) { if ((tag == 0x0303) && (type != 4)) { fread(imgdata.lens.makernotes.Lens, len, 1, ifp); } if ((tag == 0x3405) || (tag == 0x0310) || (tag == 0x34003405)) { imgdata.lens.makernotes.LensID = get4(); imgdata.lens.makernotes.LensID = ((imgdata.lens.makernotes.LensID>>2)<<8) | (imgdata.lens.makernotes.LensID & 0x3); if (imgdata.lens.makernotes.LensID != -1) { if ((model[0] == 'M') || !strncasecmp (model, "LEICA M", 7)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M; if (imgdata.lens.makernotes.LensID) imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M; } else if ((model[0] == 'S') || !strncasecmp (model, "LEICA S", 7)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S; if (imgdata.lens.makernotes.Lens[0]) imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S; } } } else if ( ((tag == 0x0313) || (tag == 0x34003406)) && (fabs(imgdata.lens.makernotes.CurAp) < 0.17f) && ((type == 10) || (type == 5)) ) { imgdata.lens.makernotes.CurAp = getreal(type); if (imgdata.lens.makernotes.CurAp > 126.3) imgdata.lens.makernotes.CurAp = 0.0f; } else if (tag == 0x3400) { parse_makernote (base, 0x3400); } } else if (!strncmp(make, "NIKON", 5)) { if (tag == 0x1d) // serial number while ((c = fgetc(ifp)) && c != EOF) serial = serial * 10 + (isdigit(c) ? c - '0' : c % 10); else if (tag == 0x0082) // lens attachment { fread(imgdata.lens.makernotes.Attachment, len, 1, ifp); } else if (tag == 0x0083) // lens type { imgdata.lens.nikon.NikonLensType = fgetc(ifp); if (!(imgdata.lens.nikon.NikonLensType & 0x01)) { imgdata.lens.makernotes.LensFeatures_pre[0] = 'A'; imgdata.lens.makernotes.LensFeatures_pre[1] = 'F'; } if (imgdata.lens.nikon.NikonLensType & 0x02) { if (imgdata.lens.nikon.NikonLensType & 0x04) imgdata.lens.makernotes.LensFeatures_suf[0] = 'G'; else imgdata.lens.makernotes.LensFeatures_suf[0] = 'D'; imgdata.lens.makernotes.LensFeatures_suf[1] = ' '; } if (imgdata.lens.nikon.NikonLensType & 0x08) { imgdata.lens.makernotes.LensFeatures_suf[2] = 'V'; imgdata.lens.makernotes.LensFeatures_suf[3] = 'R'; } if (imgdata.lens.nikon.NikonLensType & 0x10) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_CX; } if (imgdata.lens.nikon.NikonLensType & 0x20) { strcpy(imgdata.lens.makernotes.Adapter, "FT-1"); } imgdata.lens.nikon.NikonLensType = imgdata.lens.nikon.NikonLensType & 0xdf; } else if (tag == 0x0084) // lens { imgdata.lens.makernotes.MinFocal = getreal(type); imgdata.lens.makernotes.MaxFocal = getreal(type); imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); } else if (tag == 0x008b) // lens f-stops { uchar a, b, c; a = fgetc(ifp); b = fgetc(ifp); c = fgetc(ifp); if (c) { imgdata.lens.nikon.NikonLensFStops = a*b*(12/c); imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops /12.0f; } } else if (tag == 0x0098) // contains lens data { for (i = 0; i < 4; i++) { NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0'; } switch (NikonLensDataVersion) { case 100: lenNikonLensData = 9; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break; case 101: case 201: // encrypted, starting from v.201 case 202: case 203: lenNikonLensData = 15; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break; case 204: lenNikonLensData = 16; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break; case 400: lenNikonLensData = 459; break; case 401: lenNikonLensData = 590; break; case 402: lenNikonLensData = 509; break; case 403: lenNikonLensData = 879; break; } table_buf = (uchar*)malloc(lenNikonLensData); fread(table_buf, lenNikonLensData, 1, ifp); if ((NikonLensDataVersion < 201) && lenNikonLensData) { processNikonLensData(table_buf, lenNikonLensData); lenNikonLensData = 0; } } else if (tag == 0xa7) // shutter count { NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp); if ((NikonLensDataVersion > 200) && lenNikonLensData) { ci = xlat[0][serial & 0xff]; cj = xlat[1][NikonKey]; ck = 0x60; for (i = 0; i < lenNikonLensData; i++) table_buf[i] ^= (cj += ci * ck++); processNikonLensData(table_buf, lenNikonLensData); lenNikonLensData = 0; } } else if (tag == 37 && (!iso_speed || iso_speed == 65535)) { unsigned char cc; fread(&cc, 1, 1, ifp); iso_speed = (int)(100.0 * powf64(2.0, (double)(cc) / 12.0 - 5.0)); break; } } else if (!strncmp(make, "OLYMPUS", 7)) { if (tag == 0x2010) { fseek(ifp, save - 4, SEEK_SET); fseek(ifp, base + get4(), SEEK_SET); parse_makernote_0xc634(base, 0x2010, dng_writer); } switch (tag) { case 0x0207: case 0x20100100: { uchar sOlyID[7]; long unsigned OlyID; fread (sOlyID, len, 1, ifp); OlyID = sOlyID[0]; i = 1; while (sOlyID[i]) { OlyID = OlyID << 8 | sOlyID[i]; i++; } setOlympusBodyFeatures(OlyID); } break; case 0x1002: imgdata.lens.makernotes.CurAp = powf64(2.0f, getreal(type)/2); break; case 0x20100201: imgdata.lens.makernotes.LensID = (unsigned long long)fgetc(ifp)<<16 | (unsigned long long)(fgetc(ifp), fgetc(ifp))<<8 | (unsigned long long)fgetc(ifp); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT; imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT; if (((imgdata.lens.makernotes.LensID < 0x20000) || (imgdata.lens.makernotes.LensID > 0x4ffff)) && (imgdata.lens.makernotes.LensID & 0x10)) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT; } break; case 0x20100203: fread(imgdata.lens.makernotes.Lens, len, 1, ifp); break; case 0x20100205: imgdata.lens.makernotes.MaxAp4MinFocal = powf64(sqrt(2.0f), get2() / 256.0f); break; case 0x20100206: imgdata.lens.makernotes.MaxAp4MaxFocal = powf64(sqrt(2.0f), get2() / 256.0f); break; case 0x20100207: imgdata.lens.makernotes.MinFocal = (float)get2(); break; case 0x20100208: imgdata.lens.makernotes.MaxFocal = (float)get2(); if (imgdata.lens.makernotes.MaxFocal > 1000.0f) imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal; break; case 0x2010020a: imgdata.lens.makernotes.MaxAp4CurFocal = powf64(sqrt(2.0f), get2() / 256.0f); break; case 0x20100301: imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8; fgetc(ifp); imgdata.lens.makernotes.TeleconverterID = imgdata.lens.makernotes.TeleconverterID | fgetc(ifp); break; case 0x20100303: fread(imgdata.lens.makernotes.Teleconverter, len, 1, ifp); break; case 0x20100403: fread(imgdata.lens.makernotes.Attachment, len, 1, ifp); break; } } else if (!strncmp(make, "PENTAX", 6) || !strncmp(model, "PENTAX", 6) || (!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG))) { if (tag == 0x0005) { unique_id = get4(); setPentaxBodyFeatures(unique_id); if ( (dng_writer == CameraDNG) && ( (unique_id == 0x12f66) || // Q10 (unique_id == 0x12f7a) || // Q7 (unique_id == 0x12ee4) // Q ) ) base += 10; } else if (tag == 0x0013) { imgdata.lens.makernotes.CurAp = (float)get2()/10.0f; } else if (tag == 0x001d) { imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f; } else if (tag == 0x003f) { imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp); } else if (tag == 0x0207) { ushort iLensData = 0; table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); if ((imgdata.lens.makernotes.CamID < 0x12b9c) || ((imgdata.lens.makernotes.CamID == 0x12b9c) || // K100D (imgdata.lens.makernotes.CamID == 0x12b9d) || // K110D (imgdata.lens.makernotes.CamID == 0x12ba2) && // K100D Super (!table_buf[20] || (table_buf[20] == 0xff)))) { iLensData = 3; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = (((unsigned)table_buf[0]) << 8) + table_buf[1]; } else switch (len) { case 90: // LensInfo3 iLensData = 13; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4]; break; case 91: // LensInfo4 iLensData = 12; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4]; break; case 80: // LensInfo5 case 128: iLensData = 15; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[4]) <<8) + table_buf[5]; break; default: if (imgdata.lens.makernotes.CamID >= 0x12b9c) // LensInfo2 { iLensData = 4; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[0] & 0x0f) + table_buf[2]) <<8) + table_buf[3]; } } if (iLensData) { if (table_buf[iLensData+9] && (fabs(imgdata.lens.makernotes.CurFocal) < 0.1f)) imgdata.lens.makernotes.CurFocal = 10*(table_buf[iLensData+9]>>2) * powf64(4, (table_buf[iLensData+9] & 0x03)-2); if (table_buf[iLensData+10] & 0xf0) imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+10] & 0xf0) >>4)/4.0f); if (table_buf[iLensData+10] & 0x0f) imgdata.lens.makernotes.MinAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+10] & 0x0f) + 10)/4.0f); if ( (imgdata.lens.makernotes.CamID != 0x12e6c) && // K-r (imgdata.lens.makernotes.CamID != 0x12e76) && // K-5 (imgdata.lens.makernotes.CamID != 0x12f70) // K-5 II // (imgdata.lens.makernotes.CamID != 0x12f71) // K-5 II s ) { switch (table_buf[iLensData] & 0x06) { case 0: imgdata.lens.makernotes.MinAp4MinFocal = 22.0f; break; case 2: imgdata.lens.makernotes.MinAp4MinFocal = 32.0f; break; case 4: imgdata.lens.makernotes.MinAp4MinFocal = 45.0f; break; case 6: imgdata.lens.makernotes.MinAp4MinFocal = 16.0f; break; } if (table_buf[iLensData] & 0x70) imgdata.lens.makernotes.LensFStops = ((float)(((table_buf[iLensData] & 0x70) >> 4) ^ 0x07)) / 2.0f + 5.0f; if ((table_buf[iLensData+14] > 1) && (fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f)) imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+14] & 0x7f) -1)/32.0f); } else if ((imgdata.lens.makernotes.CamID != 0x12e76) && // K-5 (table_buf[iLensData+15] > 1) && (fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f)) { imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+15] & 0x7f) -1)/32.0f); } } free(table_buf); } else if (tag == 0x0239) // Q-series lens info (LensInfoQ) { char LensInfo [20]; fseek (ifp, 2, SEEK_CUR); fread(imgdata.lens.makernotes.Lens, 30, 1, ifp); strcat(imgdata.lens.makernotes.Lens, " "); fread(LensInfo, 20, 1, ifp); strcat(imgdata.lens.makernotes.Lens, LensInfo); } } else if (!strncmp(make, "SAMSUNG", 7) && (dng_writer == AdobeDNG)) { if (tag == 0x0002) { if(get4() == 0x2000) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX; } else if (!strncmp(model, "NX mini", 7)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M; } else { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; } } else if (tag == 0x0003) { imgdata.lens.makernotes.CamID = unique_id = get4(); } else if (tag == 0xa003) { imgdata.lens.makernotes.LensID = get2(); if (imgdata.lens.makernotes.LensID) imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX; } else if (tag == 0xa019) { imgdata.lens.makernotes.CurAp = getreal(type); } else if (tag == 0xa01a) { imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f; if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f) imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f; } } else if (!strncasecmp(make, "SONY", 4) || !strncasecmp(make, "Konica", 6) || !strncasecmp(make, "Minolta", 7) || (!strncasecmp(make, "Hasselblad", 10) && (!strncasecmp(model, "Stellar", 7) || !strncasecmp(model, "Lunar", 5) || !strncasecmp(model, "HV",2)))) { ushort lid; if (tag == 0xb001) // Sony ModelID { unique_id = get2(); setSonyBodyFeatures(unique_id); if (table_buf_0x9050_present) { process_Sony_0x9050(table_buf_0x9050, unique_id); free (table_buf_0x9050); table_buf_0x9050_present = 0; } if (table_buf_0x940c_present) { if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E) { process_Sony_0x940c(table_buf_0x940c); } free (table_buf_0x940c); table_buf_0x940c_present = 0; } } else if ((tag == 0x0010) && // CameraInfo strncasecmp(model, "DSLR-A100", 9) && strncasecmp(model, "NEX-5C", 6) && !strncasecmp(make, "SONY", 4) && ((len == 368) || // a700 (len == 5478) || // a850, a900 (len == 5506) || // a200, a300, a350 (len == 6118) || // a230, a290, a330, a380, a390 // a450, a500, a550, a560, a580 // a33, a35, a55 // NEX3, NEX5, NEX5C, NEXC3, VG10E (len == 15360)) ) { table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) && memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8)) { switch (len) { case 368: case 5478: // a700, a850, a900: CameraInfo if (saneSonyCameraInfo(table_buf[0], table_buf[3], table_buf[2], table_buf[5], table_buf[4], table_buf[7])) { if (table_buf[0] | table_buf[3]) imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]); if (table_buf[2] | table_buf[5]) imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]); if (table_buf[4]) imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f; if (table_buf[4]) imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f; parseSonyLensFeatures(table_buf[1], table_buf[6]); } break; default: // CameraInfo2 & 3 if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6])) { if (table_buf[1] | table_buf[2]) imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]); if (table_buf[3] | table_buf[4]) imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]); if (table_buf[5]) imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f; if (table_buf[6]) imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f; parseSonyLensFeatures(table_buf[0], table_buf[7]); } } } free(table_buf); } else if (tag == 0x0105) // Teleconverter { imgdata.lens.makernotes.TeleconverterID = get2(); } else if (tag == 0x0114) // CameraSettings { table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); switch (len) { case 280: case 364: case 332: // CameraSettings and CameraSettings2 are big endian if (table_buf[2] | table_buf[3]) { lid = (((ushort)table_buf[2])<<8) | ((ushort)table_buf[3]); imgdata.lens.makernotes.CurAp = powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f); } break; case 1536: case 2048: // CameraSettings3 are little endian parseSonyLensType2(table_buf[1016], table_buf[1015]); if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) { switch (table_buf[153]) { case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break; case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break; } } break; } free(table_buf); } else if (tag == 0x9050) // little endian { table_buf_0x9050 = (uchar*)malloc(len); table_buf_0x9050_present = 1; fread(table_buf_0x9050, len, 1, ifp); if (imgdata.lens.makernotes.CamID) { process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID); free (table_buf_0x9050); table_buf_0x9050_present = 0; } } else if (tag == 0x940c) { table_buf_0x940c = (uchar*)malloc(len); table_buf_0x940c_present = 1; fread(table_buf_0x940c, len, 1, ifp); if ((imgdata.lens.makernotes.CamID) && (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)) { process_Sony_0x940c(table_buf_0x940c); free(table_buf_0x940c); table_buf_0x940c_present = 0; } } else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1)) { imgdata.lens.makernotes.LensID = get4(); if ((imgdata.lens.makernotes.LensID > 61184) && (imgdata.lens.makernotes.LensID < 65535)) { imgdata.lens.makernotes.LensID -= 61184; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; } if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A; } else if (tag == 0xb02a) // Sony LensSpec { table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6])) { if (table_buf[1] | table_buf[2]) imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]); if (table_buf[3] | table_buf[4]) imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]); if (table_buf[5]) imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f; if (table_buf[6]) imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f; parseSonyLensFeatures(table_buf[0], table_buf[7]); } free(table_buf); } } next: fseek (ifp, save, SEEK_SET); } quit: order = sorder; } #else void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer) { /*placeholder */ } #endif void CLASS parse_makernote (int base, int uptag) { unsigned offset=0, entries, tag, type, len, save, c; unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0}; uchar buf97[324], ci, cj, ck; short morder, sorder=order; char buf[10]; unsigned SamsungKey[11]; static const double rgb_adobe[3][3] = // inv(sRGB2XYZ_D65) * AdobeRGB2XYZ_D65 {{ 1.398283396477404, -0.398283116703571, 4.427165001263944E-08}, {-1.233904514232401E-07, 0.999999995196570, 3.126724276714121e-08}, { 4.561487232726535E-08, -0.042938290466635, 1.042938250416105 }}; float adobe_cam [3][3]; uchar NikonKey; #ifdef LIBRAW_LIBRARY_BUILD unsigned NikonLensDataVersion = 0; unsigned lenNikonLensData = 0; uchar *CanonCameraInfo; unsigned lenCanonCameraInfo = 0; uchar *table_buf; uchar *table_buf_0x9050; ushort table_buf_0x9050_present = 0; uchar *table_buf_0x940c; ushort table_buf_0x940c_present = 0; #endif /* The MakerNote might have its own TIFF header (possibly with its own byte-order!), or it might just be a table. */ if (!strcmp(make,"Nokia")) return; fread (buf, 1, 10, ifp); if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */ !strncmp (buf,"VER" ,3) || !strncmp (buf,"IIII",4) || !strncmp (buf,"MMMM",4)) return; if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */ !strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */ order = 0x4d4d; while ((i=ftell(ifp)) < data_offset && i < 16384) { wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3]; wb[3] = get2(); if (wb[1] == 256 && wb[3] == 256 && wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640) FORC4 cam_mul[c] = wb[c]; } goto quit; } if (!strcmp (buf,"Nikon")) { base = ftell(ifp); order = get2(); if (get2() != 42) goto quit; offset = get4(); fseek (ifp, offset-8, SEEK_CUR); } else if (!strcmp (buf,"OLYMPUS") || !strcmp (buf,"PENTAX ")) { base = ftell(ifp)-10; fseek (ifp, -2, SEEK_CUR); order = get2(); if (buf[0] == 'O') get2(); } else if (!strncmp (buf,"SONY",4) || !strcmp (buf,"Panasonic")) { goto nf; } else if (!strncmp (buf,"FUJIFILM",8)) { base = ftell(ifp)-10; nf: order = 0x4949; fseek (ifp, 2, SEEK_CUR); } else if (!strcmp (buf,"OLYMP") || !strcmp (buf,"LEICA") || !strcmp (buf,"Ricoh") || !strcmp (buf,"EPSON")) fseek (ifp, -2, SEEK_CUR); else if (!strcmp (buf,"AOC") || !strcmp (buf,"QVC")) fseek (ifp, -4, SEEK_CUR); else { fseek (ifp, -10, SEEK_CUR); if (!strncmp(make,"SAMSUNG",7)) base = ftell(ifp); } // adjust pos & base for Leica M8/M9/M Mono tags and dir in tag 0x3400 if (!strncasecmp(make, "LEICA", 5)) { if (!strncmp(model, "M8", 2) || !strncasecmp(model, "Leica M8", 8) || !strncasecmp(model, "LEICA X", 7)) { base = ftell(ifp)-8; } else if (!strncasecmp(model, "LEICA M (Typ 240)", 17)) { base = 0; } else if (!strncmp(model, "M9", 2) || !strncasecmp(model, "Leica M9", 8) || !strncasecmp(model, "M Monochrom", 11) || !strncasecmp(model, "Leica M Monochrom", 11)) { if (!uptag) { base = ftell(ifp) - 10; fseek (ifp, 8, SEEK_CUR); } else if (uptag == 0x3400) { fseek (ifp, 10, SEEK_CUR); base += 10; } } else if (!strncasecmp(model, "LEICA T", 7)) { base = ftell(ifp)-8; #ifdef LIBRAW_LIBRARY_BUILD imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_T; #endif } } entries = get2(); // printf("\n*** parse_makernote\n\tmake =%s=\n\tmodel =%s= \n\tentries: %d\n\tpos: 0x%llx\n", // make, model, entries, ftell(ifp)); if (entries > 1000) return; morder = order; while (entries--) { order = morder; tiff_get (base, &tag, &type, &len, &save); tag |= uptag << 16; // printf ("\n\tbase: 0x%x tag: 0x%04x type: 0x%x len: 0x%x pos: 0x%llx", // base, tag, type, len, ftell(ifp)); #ifdef LIBRAW_LIBRARY_BUILD if (!strcmp(make, "Canon")) { if (tag == 0x0001) // camera settings { fseek(ifp, 44, SEEK_CUR); imgdata.lens.makernotes.LensID = get2(); imgdata.lens.makernotes.MaxFocal = get2(); imgdata.lens.makernotes.MinFocal = get2(); imgdata.lens.makernotes.CanonFocalUnits = get2(); if (imgdata.lens.makernotes.CanonFocalUnits != 1) { imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; } imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2()); imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2()); } else if (tag == 0x0002) // focal length { imgdata.lens.makernotes.FocalType = get2(); imgdata.lens.makernotes.CurFocal = get2(); if ((imgdata.lens.makernotes.CanonFocalUnits != 1) && imgdata.lens.makernotes.CanonFocalUnits) { imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; } } else if (tag == 0x0004) // shot info { fseek(ifp, 42, SEEK_CUR); imgdata.lens.makernotes.CurAp = _CanonConvertAperture(get2()); } else if (tag == 0x000d) // camera info { CanonCameraInfo = (uchar*)malloc(len); fread(CanonCameraInfo, len, 1, ifp); lenCanonCameraInfo = len; } else if (tag == 0x0095 && // lens model tag !imgdata.lens.makernotes.Lens[0]) { fread(imgdata.lens.makernotes.Lens, 2, 1, ifp); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; if (imgdata.lens.makernotes.Lens[0] < 65) // non-Canon lens fread(imgdata.lens.makernotes.Lens + 2, 62, 1, ifp); else { char efs[2]; imgdata.lens.makernotes.LensFeatures_pre[0] = imgdata.lens.makernotes.Lens[0]; imgdata.lens.makernotes.LensFeatures_pre[1] = imgdata.lens.makernotes.Lens[1]; fread(efs, 2, 1, ifp); if (efs[0] == 45 && (efs[1] == 83 || efs[1] == 69 || efs[1] == 77)) { // "EF-S, TS-E, MP-E, EF-M" lenses imgdata.lens.makernotes.Lens[2] = imgdata.lens.makernotes.LensFeatures_pre[2] = efs[0]; imgdata.lens.makernotes.Lens[3] = imgdata.lens.makernotes.LensFeatures_pre[3] = efs[1]; imgdata.lens.makernotes.Lens[4] = 32; if (efs[1] == 83) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S; imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC; } else if (efs[1] == 77) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M; } } else { // "EF" lenses imgdata.lens.makernotes.Lens[2] = 32; imgdata.lens.makernotes.Lens[3] = efs[0]; imgdata.lens.makernotes.Lens[4] = efs[1]; } fread(imgdata.lens.makernotes.Lens + 5, 58, 1, ifp); } } } else if (!strncmp(make, "FUJI", 4)) switch (tag) { case 0x1404: imgdata.lens.makernotes.MinFocal = getreal(type); break; case 0x1405: imgdata.lens.makernotes.MaxFocal = getreal(type); break; case 0x1406: imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); break; case 0x1407: imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); break; } else if (!strncasecmp(make, "LEICA", 5)) { if ((tag == 0x0303) && (type != 4)) { fread(imgdata.lens.makernotes.Lens, len, 1, ifp); } if ((tag == 0x3405) || (tag == 0x0310) || (tag == 0x34003405)) { imgdata.lens.makernotes.LensID = get4(); imgdata.lens.makernotes.LensID = ((imgdata.lens.makernotes.LensID>>2)<<8) | (imgdata.lens.makernotes.LensID & 0x3); if (imgdata.lens.makernotes.LensID != -1) { if ((model[0] == 'M') || !strncasecmp (model, "LEICA M", 7)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M; if (imgdata.lens.makernotes.LensID) imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M; } else if ((model[0] == 'S') || !strncasecmp (model, "LEICA S", 7)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S; if (imgdata.lens.makernotes.Lens[0]) imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S; } } } else if ( ((tag == 0x0313) || (tag == 0x34003406)) && (fabs(imgdata.lens.makernotes.CurAp) < 0.17f) && ((type == 10) || (type == 5)) ) { imgdata.lens.makernotes.CurAp = getreal(type); if (imgdata.lens.makernotes.CurAp > 126.3) imgdata.lens.makernotes.CurAp = 0.0f; } else if (tag == 0x3400) { parse_makernote (base, 0x3400); } } else if (!strncmp(make, "NIKON",5)) { if (tag == 0x0082) // lens attachment { fread(imgdata.lens.makernotes.Attachment, len, 1, ifp); } else if (tag == 0x0083) // lens type { imgdata.lens.nikon.NikonLensType = fgetc(ifp); if (!(imgdata.lens.nikon.NikonLensType & 0x01)) { imgdata.lens.makernotes.LensFeatures_pre[0] = 'A'; imgdata.lens.makernotes.LensFeatures_pre[1] = 'F'; } if (imgdata.lens.nikon.NikonLensType & 0x02) { if (imgdata.lens.nikon.NikonLensType & 0x04) imgdata.lens.makernotes.LensFeatures_suf[0] = 'G'; else imgdata.lens.makernotes.LensFeatures_suf[0] = 'D'; imgdata.lens.makernotes.LensFeatures_suf[1] = ' '; } if (imgdata.lens.nikon.NikonLensType & 0x08) { imgdata.lens.makernotes.LensFeatures_suf[2] = 'V'; imgdata.lens.makernotes.LensFeatures_suf[3] = 'R'; } if (imgdata.lens.nikon.NikonLensType & 0x10) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_CX; } if (imgdata.lens.nikon.NikonLensType & 0x20) { strcpy(imgdata.lens.makernotes.Adapter, "FT-1"); } imgdata.lens.nikon.NikonLensType = imgdata.lens.nikon.NikonLensType & 0xdf; } else if (tag == 0x0084) // lens { imgdata.lens.makernotes.MinFocal = getreal(type); imgdata.lens.makernotes.MaxFocal = getreal(type); imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); } else if (tag == 0x008b) // lens f-stops { uchar a, b, c; a = fgetc(ifp); b = fgetc(ifp); c = fgetc(ifp); if (c) { imgdata.lens.nikon.NikonLensFStops = a*b*(12/c); imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops /12.0f; } } else if (tag == 0x0098) // contains lens data { for (i = 0; i < 4; i++) { NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0'; } switch (NikonLensDataVersion) { case 100: lenNikonLensData = 9; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break; case 101: case 201: // encrypted, starting from v.201 case 202: case 203: lenNikonLensData = 15; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break; case 204: lenNikonLensData = 16; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break; case 400: lenNikonLensData = 459; break; case 401: lenNikonLensData = 590; break; case 402: lenNikonLensData = 509; break; case 403: lenNikonLensData = 879; break; } table_buf = (uchar*)malloc(lenNikonLensData); fread(table_buf, lenNikonLensData, 1, ifp); if ((NikonLensDataVersion < 201) && lenNikonLensData) { processNikonLensData(table_buf, lenNikonLensData); lenNikonLensData = 0; } } } else if (!strncmp(make, "OLYMPUS", 7)) { switch (tag) { case 0x0207: case 0x20100100: { uchar sOlyID[7]; long unsigned OlyID; fread (sOlyID, len, 1, ifp); OlyID = sOlyID[0]; i = 1; while (sOlyID[i]) { OlyID = OlyID << 8 | sOlyID[i]; i++; } setOlympusBodyFeatures(OlyID); } break; case 0x1002: imgdata.lens.makernotes.CurAp = powf64(2.0f, getreal(type)/2); break; case 0x20100201: imgdata.lens.makernotes.LensID = (unsigned long long)fgetc(ifp)<<16 | (unsigned long long)(fgetc(ifp), fgetc(ifp))<<8 | (unsigned long long)fgetc(ifp); imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT; imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT; if (((imgdata.lens.makernotes.LensID < 0x20000) || (imgdata.lens.makernotes.LensID > 0x4ffff)) && (imgdata.lens.makernotes.LensID & 0x10)) { imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT; } break; case 0x20100203: fread(imgdata.lens.makernotes.Lens, len, 1, ifp); break; case 0x20100205: imgdata.lens.makernotes.MaxAp4MinFocal = powf64(sqrt(2.0f), get2() / 256.0f); break; case 0x20100206: imgdata.lens.makernotes.MaxAp4MaxFocal = powf64(sqrt(2.0f), get2() / 256.0f); break; case 0x20100207: imgdata.lens.makernotes.MinFocal = (float)get2(); break; case 0x20100208: imgdata.lens.makernotes.MaxFocal = (float)get2(); if (imgdata.lens.makernotes.MaxFocal > 1000.0f) imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal; break; case 0x2010020a: imgdata.lens.makernotes.MaxAp4CurFocal = powf64(sqrt(2.0f), get2() / 256.0f); break; case 0x20100301: imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8; fgetc(ifp); imgdata.lens.makernotes.TeleconverterID = imgdata.lens.makernotes.TeleconverterID | fgetc(ifp); break; case 0x20100303: fread(imgdata.lens.makernotes.Teleconverter, len, 1, ifp); break; case 0x20100403: fread(imgdata.lens.makernotes.Attachment, len, 1, ifp); break; } } else if (!strncmp(make, "PENTAX", 6) && !strncmp(model, "GR", 2)) { if ((tag == 0x1001) && (type == 3)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC; imgdata.lens.makernotes.LensID = -1; imgdata.lens.makernotes.FocalType = 1; } else if ((tag == 0x1017) && (get2() == 2)) { strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter"); } else if (tag == 0x1500) { imgdata.lens.makernotes.CurFocal = getreal(type); } } else if (!strncmp(make, "RICOH", 5) && strncmp(model, "PENTAX", 6)) { if ((tag == 0x1017) && (get2() == 2)) { strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter"); } else if (tag == 0x1500) { imgdata.lens.makernotes.CurFocal = getreal(type); } else if (tag == 0x2001) { short ntags, cur_tag; fseek(ifp, 20, SEEK_CUR); ntags = get2(); cur_tag = get2(); while (cur_tag != 0x002c) { fseek(ifp, 10, SEEK_CUR); cur_tag = get2(); } fseek(ifp, 6, SEEK_CUR); fseek(ifp, get4()+34, SEEK_SET); imgdata.lens.makernotes.LensID = getc(ifp) - '0'; switch(imgdata.lens.makernotes.LensID) { case 1: case 2: case 3: case 5: case 6: imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_RicohModule; break; case 8: imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M; imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC; imgdata.lens.makernotes.LensID = -1; break; default: imgdata.lens.makernotes.LensID = -1; } } } else if (!strncmp(make, "PENTAX", 6) || !strncmp(model, "PENTAX", 6) || (!strncmp(make, "SAMSUNG", 7) && dng_version) && strncmp(model, "GR", 2)) { if (tag == 0x0005) { unique_id = get4(); setPentaxBodyFeatures(unique_id); } else if (tag == 0x0013) { imgdata.lens.makernotes.CurAp = (float)get2()/10.0f; } else if (tag == 0x001d) { imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f; } else if (tag == 0x003f) { imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp); } else if (tag == 0x0207) { ushort iLensData = 0; table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); if ((imgdata.lens.makernotes.CamID < 0x12b9c) || ((imgdata.lens.makernotes.CamID == 0x12b9c) || // K100D (imgdata.lens.makernotes.CamID == 0x12b9d) || // K110D (imgdata.lens.makernotes.CamID == 0x12ba2) && // K100D Super (!table_buf[20] || (table_buf[20] == 0xff)))) { iLensData = 3; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = (((unsigned)table_buf[0]) << 8) + table_buf[1]; } else switch (len) { case 90: // LensInfo3 iLensData = 13; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4]; break; case 91: // LensInfo4 iLensData = 12; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4]; break; case 80: // LensInfo5 case 128: iLensData = 15; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[4]) <<8) + table_buf[5]; break; default: if (imgdata.lens.makernotes.CamID >= 0x12b9c) // LensInfo2 { iLensData = 4; if (imgdata.lens.makernotes.LensID == -1) imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[0] & 0x0f) + table_buf[2]) <<8) + table_buf[3]; } } if (iLensData) { if (table_buf[iLensData+9] && (fabs(imgdata.lens.makernotes.CurFocal) < 0.1f)) imgdata.lens.makernotes.CurFocal = 10*(table_buf[iLensData+9]>>2) * powf64(4, (table_buf[iLensData+9] & 0x03)-2); if (table_buf[iLensData+10] & 0xf0) imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+10] & 0xf0) >>4)/4.0f); if (table_buf[iLensData+10] & 0x0f) imgdata.lens.makernotes.MinAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+10] & 0x0f) + 10)/4.0f); if ( (imgdata.lens.makernotes.CamID != 0x12e6c) && // K-r (imgdata.lens.makernotes.CamID != 0x12e76) && // K-5 (imgdata.lens.makernotes.CamID != 0x12f70) // K-5 II // (imgdata.lens.makernotes.CamID != 0x12f71) // K-5 II s ) { switch (table_buf[iLensData] & 0x06) { case 0: imgdata.lens.makernotes.MinAp4MinFocal = 22.0f; break; case 2: imgdata.lens.makernotes.MinAp4MinFocal = 32.0f; break; case 4: imgdata.lens.makernotes.MinAp4MinFocal = 45.0f; break; case 6: imgdata.lens.makernotes.MinAp4MinFocal = 16.0f; break; } if (table_buf[iLensData] & 0x70) imgdata.lens.makernotes.LensFStops = ((float)(((table_buf[iLensData] & 0x70) >> 4) ^ 0x07)) / 2.0f + 5.0f; if ((table_buf[iLensData+14] > 1) && (fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f)) imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+14] & 0x7f) -1)/32.0f); } else if ((imgdata.lens.makernotes.CamID != 0x12e76) && // K-5 (table_buf[iLensData+15] > 1) && (fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f)) { imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (float)((table_buf[iLensData+15] & 0x7f) -1)/32.0f); } } free(table_buf); } else if (tag == 0x0239) // Q-series lens info (LensInfoQ) { char LensInfo [20]; fseek (ifp, 2, SEEK_CUR); fread(imgdata.lens.makernotes.Lens, 30, 1, ifp); strcat(imgdata.lens.makernotes.Lens, " "); fread(LensInfo, 20, 1, ifp); strcat(imgdata.lens.makernotes.Lens, LensInfo); } } else if (!strncmp(make, "SAMSUNG", 7)) { if (tag == 0x0002) { if(get4() == 0x2000) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX; } else if (!strncmp(model, "NX mini", 7)) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M; } else { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; } } else if (tag == 0x0003) { unique_id = imgdata.lens.makernotes.CamID = get4(); } else if (tag == 0xa003) { imgdata.lens.makernotes.LensID = get2(); if (imgdata.lens.makernotes.LensID) imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX; } else if (tag == 0xa019) { imgdata.lens.makernotes.CurAp = getreal(type); } else if (tag == 0xa01a) { imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f; if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f) imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f; } } else if (!strncasecmp(make, "SONY", 4) || !strncasecmp(make, "Konica", 6) || !strncasecmp(make, "Minolta", 7) || (!strncasecmp(make, "Hasselblad", 10) && (!strncasecmp(model, "Stellar", 7) || !strncasecmp(model, "Lunar", 5) || !strncasecmp(model, "HV",2)))) { ushort lid; if (tag == 0xb001) // Sony ModelID { unique_id = get2(); setSonyBodyFeatures(unique_id); if (table_buf_0x9050_present) { process_Sony_0x9050(table_buf_0x9050, unique_id); free (table_buf_0x9050); table_buf_0x9050_present = 0; } if (table_buf_0x940c_present) { if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E) { process_Sony_0x940c(table_buf_0x940c); } free (table_buf_0x940c); table_buf_0x940c_present = 0; } } else if ((tag == 0x0010) && // CameraInfo strncasecmp(model, "DSLR-A100", 9) && strncasecmp(model, "NEX-5C", 6) && !strncasecmp(make, "SONY", 4) && ((len == 368) || // a700 (len == 5478) || // a850, a900 (len == 5506) || // a200, a300, a350 (len == 6118) || // a230, a290, a330, a380, a390 // a450, a500, a550, a560, a580 // a33, a35, a55 // NEX3, NEX5, NEX5C, NEXC3, VG10E (len == 15360)) ) { table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) && memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8)) { switch (len) { case 368: case 5478: // a700, a850, a900: CameraInfo if (table_buf[0] | table_buf[3]) imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]); if (table_buf[2] | table_buf[5]) imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]); if (table_buf[4]) imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f; if (table_buf[4]) imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f; parseSonyLensFeatures(table_buf[1], table_buf[6]); break; default: // CameraInfo2 & 3 if (table_buf[1] | table_buf[2]) imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]); if (table_buf[3] | table_buf[4]) imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]); if (table_buf[5]) imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f; if (table_buf[6]) imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f; parseSonyLensFeatures(table_buf[0], table_buf[7]); } } free(table_buf); } else if (tag == 0x0105) // Teleconverter { imgdata.lens.makernotes.TeleconverterID = get2(); } else if (tag == 0x0114) // CameraSettings { table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); switch (len) { case 280: case 364: case 332: // CameraSettings and CameraSettings2 are big endian if (table_buf[2] | table_buf[3]) { lid = (((ushort)table_buf[2])<<8) | ((ushort)table_buf[3]); imgdata.lens.makernotes.CurAp = powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f); } break; case 1536: case 2048: // CameraSettings3 are little endian parseSonyLensType2(table_buf[1016], table_buf[1015]); if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) { switch (table_buf[153]) { case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break; case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break; } } break; } free(table_buf); } else if (tag == 0x9050) // little endian { table_buf_0x9050 = (uchar*)malloc(len); table_buf_0x9050_present = 1; fread(table_buf_0x9050, len, 1, ifp); if (imgdata.lens.makernotes.CamID) { process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID); free (table_buf_0x9050); table_buf_0x9050_present = 0; } } else if (tag == 0x940c) { table_buf_0x940c = (uchar*)malloc(len); table_buf_0x940c_present = 1; fread(table_buf_0x940c, len, 1, ifp); if ((imgdata.lens.makernotes.CamID) && (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)) { process_Sony_0x940c(table_buf_0x940c); free(table_buf_0x940c); table_buf_0x940c_present = 0; } } else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1)) { imgdata.lens.makernotes.LensID = get4(); if ((imgdata.lens.makernotes.LensID > 61184) && (imgdata.lens.makernotes.LensID < 65535)) { imgdata.lens.makernotes.LensID -= 61184; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF; } if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A; } else if (tag == 0xb02a) // Sony LensSpec { table_buf = (uchar*)malloc(len); fread(table_buf, len, 1, ifp); if (table_buf[1] | table_buf[2]) imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]); if (table_buf[3] | table_buf[4]) imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]); if (table_buf[5]) imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f; if (table_buf[6]) imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f; parseSonyLensFeatures(table_buf[0], table_buf[7]); free(table_buf); } } #endif if (tag == 2 && strstr(make,"NIKON") && !iso_speed) iso_speed = (get2(),get2()); if (tag == 37 && strstr(make,"NIKON") && (!iso_speed || iso_speed == 65535)) { unsigned char cc; fread(&cc,1,1,ifp); iso_speed = int(100.0 * powf64(2.0f,float(cc)/12.0-5.0)); } if (tag == 4 && len > 26 && len < 35) { if ((i=(get4(),get2())) != 0x7fff && (!iso_speed || iso_speed == 65535)) iso_speed = 50 * powf64(2.0, i/32.0 - 4); if ((i=(get2(),get2())) != 0x7fff && !aperture) aperture = powf64(2.0, i/64.0); if ((i=get2()) != 0xffff && !shutter) shutter = powf64(2.0, (short) i/-32.0); wbi = (get2(),get2()); shot_order = (get2(),get2()); } if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) { fseek (ifp, tag == 4 ? 140:160, SEEK_CUR); switch (get2()) { case 72: flip = 0; break; case 76: flip = 6; break; case 82: flip = 5; break; } } if (tag == 7 && type == 2 && len > 20) fgets (model2, 64, ifp); if (tag == 8 && type == 4) shot_order = get4(); if (tag == 9 && !strcmp(make,"Canon")) fread (artist, 64, 1, ifp); if (tag == 0xc && len == 4) FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type); if (tag == 0xd && type == 7 && get2() == 0xaaaa) { for (c=i=2; (ushort) c != 0xbbbb && i < len; i++) c = c << 8 | fgetc(ifp); while ((i+=4) < len-5) if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3) flip = "065"[c]-'0'; } if (tag == 0x10 && type == 4) { unique_id = get4(); #ifdef LIBRAW_LIBRARY_BUILD setCanonBodyFeatures(unique_id); if (lenCanonCameraInfo) processCanonCameraInfo(unique_id, CanonCameraInfo); #endif } #ifdef LIBRAW_LIBRARY_BUILD if(tag == 0x20400805 && len == 2 && !strncasecmp(make,"Olympus",7)) { imgdata.color.OlympusSensorCalibration[0]=getreal(type); imgdata.color.OlympusSensorCalibration[1]=getreal(type); } if (tag == 0x4001 && len > 500 && !strcasecmp(make,"Canon")) { long int save1 = ftell(ifp); switch (len) { case 582: imgdata.color.canon_makernotes.CanonColorDataVer = 1; // 20D / 350D break; case 653: imgdata.color.canon_makernotes.CanonColorDataVer = 2; // 1Dmk2 / 1DsMK2 break; case 796: imgdata.color.canon_makernotes.CanonColorDataVer = 3; // 1DmkIIN / 5D / 30D / 400D // 1DmkIII / 1DSmkIII / 1DmkIV / 5DmkII // 7D / 40D / 50D / 60D / 450D / 500D // 550D / 1000D / 1100D case 674: case 692: case 702: case 1227: case 1250: case 1251: case 1337: case 1338: case 1346: imgdata.color.canon_makernotes.CanonColorDataVer = 4; imgdata.color.canon_makernotes.CanonColorDataSubVer = get2(); { fseek (ifp, save1+(0x0e7<<1), SEEK_SET); // offset 231 short int bls=0; FORC4 bls+=get2(); imgdata.color.canon_makernotes.AverageBlackLevel = bls/4; } if ((imgdata.color.canon_makernotes.CanonColorDataSubVer == 4) || (imgdata.color.canon_makernotes.CanonColorDataSubVer == 5)) { fseek (ifp, save1+(0x2b9<<1), SEEK_SET); // offset 697 shorts imgdata.color.canon_makernotes.SpecularWhiteLevel = get2(); } else if ((imgdata.color.canon_makernotes.CanonColorDataSubVer == 6) || (imgdata.color.canon_makernotes.CanonColorDataSubVer == 7)) { fseek (ifp, save1+(0x2d0<<1), SEEK_SET); // offset 720 shorts imgdata.color.canon_makernotes.SpecularWhiteLevel = get2(); } else if (imgdata.color.canon_makernotes.CanonColorDataSubVer == 9) { fseek (ifp, save1+(0x2d4<<1), SEEK_SET); // offset 724 shorts imgdata.color.canon_makernotes.SpecularWhiteLevel = get2(); } break; case 5120: imgdata.color.canon_makernotes.CanonColorDataVer = 5; // PowerSot G10 break; case 1273: case 1275: imgdata.color.canon_makernotes.CanonColorDataVer = 6; // 600D / 1200D imgdata.color.canon_makernotes.CanonColorDataSubVer = get2(); { fseek (ifp, save1+(0x0fb<<1), SEEK_SET); // offset 251 short int bls=0; FORC4 bls+=get2(); imgdata.color.canon_makernotes.AverageBlackLevel = bls/4; } fseek (ifp, save1+(0x1e4<<1), SEEK_SET); // offset 484 shorts imgdata.color.canon_makernotes.SpecularWhiteLevel = get2(); break; // 1DX / 5DmkIII / 6D / 100D / 650D / 700D / M / 7DmkII / 750D / 760D case 1312: case 1313: case 1316: case 1506: imgdata.color.canon_makernotes.CanonColorDataVer = 7; imgdata.color.canon_makernotes.CanonColorDataSubVer = get2(); { fseek (ifp, save1+(0x114<<1), SEEK_SET); // offset 276 shorts int bls=0; FORC4 bls+=get2(); imgdata.color.canon_makernotes.AverageBlackLevel = bls/4; } if (imgdata.color.canon_makernotes.CanonColorDataSubVer == 10) { fseek (ifp, save1+(0x1fd<<1), SEEK_SET); // offset 509 shorts imgdata.color.canon_makernotes.SpecularWhiteLevel = get2(); } else if (imgdata.color.canon_makernotes.CanonColorDataSubVer == 11) { fseek (ifp, save1+(0x2dd<<1), SEEK_SET); // offset 733 shorts imgdata.color.canon_makernotes.SpecularWhiteLevel = get2(); } break; } fseek (ifp, save1, SEEK_SET); } #endif if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) { fseek (ifp, get4()+base, SEEK_SET); parse_tiff_ifd (base); } if (tag == 0x14 && type == 7) { if (len == 2560) { fseek (ifp, 1248, SEEK_CUR); goto get2_256; } fread (buf, 1, 10, ifp); if (!strncmp(buf,"NRW ",4)) { fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR); cam_mul[0] = get4() << 2; cam_mul[1] = get4() + get4(); cam_mul[2] = get4() << 2; } } if (tag == 0x15 && type == 2 && is_raw) fread (model, 64, 1, ifp); if (strstr(make,"PENTAX")) { if (tag == 0x1b) tag = 0x1018; if (tag == 0x1c) tag = 0x1017; } if (tag == 0x1d) while ((c = fgetc(ifp)) && c != EOF) serial = serial*10 + (isdigit(c) ? c - '0' : c % 10); if (tag == 0x29 && type == 1) { // Canon PowerShot G9 c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0; fseek (ifp, 8 + c*32, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4(); } #ifndef LIBRAW_LIBRARY_BUILD // works for some files, but not all if (tag == 0x3d && type == 3 && len == 4) FORC4 cblack[c ^ c >> 1] = get2() >> (14-tiff_ifd[2].bps); #endif if (tag == 0x81 && type == 4) { data_offset = get4(); fseek (ifp, data_offset + 41, SEEK_SET); raw_height = get2() * 2; raw_width = get2(); filters = 0x61616161; } if ((tag == 0x81 && type == 7) || (tag == 0x100 && type == 7) || (tag == 0x280 && type == 1)) { thumb_offset = ftell(ifp); thumb_length = len; } if (tag == 0x88 && type == 4 && (thumb_offset = get4())) thumb_offset += base; if (tag == 0x89 && type == 4) thumb_length = get4(); if (tag == 0x8c || tag == 0x96) meta_offset = ftell(ifp); if (tag == 0x97) { for (i=0; i < 4; i++) ver97 = ver97 * 10 + fgetc(ifp)-'0'; switch (ver97) { case 100: fseek (ifp, 68, SEEK_CUR); FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2(); break; case 102: fseek (ifp, 6, SEEK_CUR); goto get2_rggb; case 103: fseek (ifp, 16, SEEK_CUR); FORC4 cam_mul[c] = get2(); } if (ver97 >= 200) { if (ver97 != 205) fseek (ifp, 280, SEEK_CUR); fread (buf97, 324, 1, ifp); } } if (tag == 0xa1 && type == 7) { order = 0x4949; fseek (ifp, 140, SEEK_CUR); FORC3 cam_mul[c] = get4(); } if (tag == 0xa4 && type == 3) { fseek (ifp, wbi*48, SEEK_CUR); FORC3 cam_mul[c] = get2(); } if (tag == 0xa7) { // shutter count NikonKey = fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp); if ( (unsigned) (ver97-200) < 17) { ci = xlat[0][serial & 0xff]; cj = xlat[1][NikonKey]; ck = 0x60; for (i=0; i < 324; i++) buf97[i] ^= (cj += ci * ck++); i = "66666>666;6A;:;55"[ver97-200] - '0'; FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] = sget2 (buf97 + (i & -2) + c*2); } #ifdef LIBRAW_LIBRARY_BUILD if ((NikonLensDataVersion > 200) && lenNikonLensData) { ci = xlat[0][serial & 0xff]; cj = xlat[1][NikonKey]; ck = 0x60; for (i = 0; i < lenNikonLensData; i++) table_buf[i] ^= (cj += ci * ck++); processNikonLensData(table_buf, lenNikonLensData); lenNikonLensData = 0; } #endif } if(tag == 0xb001 && type == 3) // Sony ModelID { unique_id = get2(); } if (tag == 0x200 && len == 3) shot_order = (get4(),get4()); if (tag == 0x200 && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0x201 && len == 4) goto get2_rggb; if (tag == 0x220 && type == 7) meta_offset = ftell(ifp); if (tag == 0x401 && type == 4 && len == 4) FORC4 cblack[c ^ c >> 1] = get4(); #ifdef LIBRAW_LIBRARY_BUILD // not corrected for file bitcount, to be patched in open_datastream if (tag == 0x03d && strstr(make,"NIKON") && len == 4) { FORC4 cblack[c ^ c >> 1] = get2(); i = cblack[3]; FORC3 if(i>cblack[c]) i = cblack[c]; FORC4 cblack[c]-=i; black += i; } #endif if (tag == 0xe01) { /* Nikon Capture Note */ order = 0x4949; fseek (ifp, 22, SEEK_CUR); for (offset=22; offset+22 < len; offset += 22+i) { tag = get4(); fseek (ifp, 14, SEEK_CUR); i = get4()-4; if (tag == 0x76a43207) flip = get2(); else fseek (ifp, i, SEEK_CUR); } } if (tag == 0xe80 && len == 256 && type == 7) { fseek (ifp, 48, SEEK_CUR); cam_mul[0] = get2() * 508 * 1.078 / 0x10000; cam_mul[2] = get2() * 382 * 1.173 / 0x10000; } if (tag == 0xf00 && type == 7) { if (len == 614) fseek (ifp, 176, SEEK_CUR); else if (len == 734 || len == 1502) fseek (ifp, 148, SEEK_CUR); else goto next; goto get2_256; } if ((tag == 0x1011 && len == 9) || tag == 0x20400200) { if(!strncasecmp(make,"Olympus", 7)) { int j,k; for (i=0; i < 3; i++) FORC3 adobe_cam[i][c] = ((short) get2()) / 256.0; for (i=0; i < 3; i++) for (j=0; j < 3; j++) for (cmatrix[i][j] = k=0; k < 3; k++) cmatrix[i][j] += rgb_adobe[i][k] * adobe_cam[k][j]; } else for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = ((short) get2()) / 256.0; } if ((tag == 0x1012 || tag == 0x20400600) && len == 4) FORC4 cblack[c ^ c >> 1] = get2(); if (tag == 0x1017 || tag == 0x20400100) cam_mul[0] = get2() / 256.0; if (tag == 0x1018 || tag == 0x20400100) cam_mul[2] = get2() / 256.0; if (tag == 0x2011 && len == 2) { get2_256: order = 0x4d4d; cam_mul[0] = get2() / 256.0; cam_mul[2] = get2() / 256.0; } if ((tag | 0x70) == 0x2070 && (type == 4 || type == 13)) fseek (ifp, get4()+base, SEEK_SET); if (tag == 0x2020) parse_thumb_note (base, 257, 258); if (tag == 0x2040) parse_makernote (base, 0x2040); #ifdef LIBRAW_LIBRARY_BUILD // IB start if (tag == 0x2010) { parse_makernote(base, 0x2010); } // IB end #endif if (tag == 0xb028) { fseek (ifp, get4()+base, SEEK_SET); parse_thumb_note (base, 136, 137); } if (tag == 0x4001 && len > 500) { i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126; fseek (ifp, i, SEEK_CUR); get2_rggb: FORC4 cam_mul[c ^ (c >> 1)] = get2(); i = len >> 3 == 164 || len == 1506 ? 112:22; fseek (ifp, i, SEEK_CUR); FORC4 sraw_mul[c ^ (c >> 1)] = get2(); } if(!strcasecmp(make,"Samsung")) { if (tag == 0xa020) // get the full Samsung encryption key for (i=0; i<11; i++) SamsungKey[i] = get4(); if (tag == 0xa021) // get and decode Samsung cam_mul array FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c]; if (tag == 0xa030 && len == 9) // get and decode Samsung color matrix for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = (short)((get4() + SamsungKey[i*3+c]))/256.0; if (tag == 0xa028) FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c]; } else { // Somebody else use 0xa021 and 0xa028? if (tag == 0xa021) FORC4 cam_mul[c ^ (c >> 1)] = get4(); if (tag == 0xa028) FORC4 cam_mul[c ^ (c >> 1)] -= get4(); } if (tag == 0x4021 && get4() && get4()) FORC4 cam_mul[c] = 1024; next: fseek (ifp, save, SEEK_SET); } quit: order = sorder; } /* Since the TIFF DateTime string has no timezone information, assume that the camera's clock was set to Universal Time. */ void CLASS get_timestamp (int reversed) { struct tm t; char str[20]; int i; str[19] = 0; if (reversed) for (i=19; i--; ) str[i] = fgetc(ifp); else fread (str, 19, 1, ifp); memset (&t, 0, sizeof t); if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6) return; t.tm_year -= 1900; t.tm_mon -= 1; t.tm_isdst = -1; if (mktime(&t) > 0) timestamp = mktime(&t); } void CLASS parse_exif (int base) { unsigned kodak, entries, tag, type, len, save, c; double expo,ape; kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3; entries = get2(); if(!strcmp(make,"Hasselblad") && (tiff_nifds > 3) && (entries > 512)) return; // printf("\n*** in parse_exif, make: =%s= model: =%s=", make, model); while (entries--) { tiff_get (base, &tag, &type, &len, &save); // printf("\n\ttag: %x", tag); #ifdef LIBRAW_LIBRARY_BUILD if(callbacks.exif_cb) { int savepos = ftell(ifp); callbacks.exif_cb(callbacks.exifparser_data,tag,type,len,order,ifp); fseek(ifp,savepos,SEEK_SET); } #endif switch (tag) { #ifdef LIBRAW_LIBRARY_BUILD case 0xa405: // FocalLengthIn35mmFormat imgdata.lens.FocalLengthIn35mmFormat = get2(); break; case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard imgdata.lens.MinFocal = getreal(type); imgdata.lens.MaxFocal = getreal(type); imgdata.lens.MaxAp4MinFocal = getreal(type); imgdata.lens.MaxAp4MaxFocal = getreal(type); break; case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard imgdata.lens.dng.MinFocal = getreal(type); imgdata.lens.dng.MaxFocal = getreal(type); imgdata.lens.dng.MaxAp4MinFocal = getreal(type); imgdata.lens.dng.MaxAp4MaxFocal = getreal(type); break; case 0xa433: // LensMake fread(imgdata.lens.LensMake, MIN(len,sizeof(imgdata.lens.LensMake)), 1, ifp); break; case 0xa434: // LensModel fread(imgdata.lens.Lens, MIN(len, sizeof(imgdata.lens.LensMake)), 1, ifp); if (!strncmp(imgdata.lens.Lens, "----", 4)) imgdata.lens.Lens[0] = 0; break; case 0x9205: imgdata.lens.EXIF_MaxAp = powf64(2.0f, (getreal(type) / 2.0f)); break; #endif case 33434: shutter = getreal(type); break; case 33437: aperture = getreal(type); break; case 34855: iso_speed = get2(); break; case 34866: if (iso_speed == 0xffff && (!strcasecmp(make, "SONY") || !strcasecmp(make, "CANON"))) iso_speed = getreal(type); break; case 36867: case 36868: get_timestamp(0); break; case 37377: if ((expo = -getreal(type)) < 128 && shutter == 0.) shutter = powf64(2.0, expo); break; case 37378: if (fabs(ape = getreal(type))<256.0) aperture = powf64(2.0, ape/2); break; case 37385: flash_used = getreal(type); break; case 37386: focal_len = getreal(type); break; case 37500: parse_makernote (base, 0); break; // tag 0x927c case 40962: if (kodak) raw_width = get4(); break; case 40963: if (kodak) raw_height = get4(); break; case 41730: if (get4() == 0x20002) for (exif_cfa=c=0; c < 8; c+=2) exif_cfa |= fgetc(ifp) * 0x01010101 << c; } fseek (ifp, save, SEEK_SET); } } #ifdef LIBRAW_LIBRARY_BUILD void CLASS parse_gps_libraw(int base) { unsigned entries, tag, type, len, save, c; entries = get2(); if (entries > 0) imgdata.other.parsed_gps.gpsparsed = 1; while (entries--) { tiff_get(base, &tag, &type, &len, &save); switch (tag) { case 1: imgdata.other.parsed_gps.latref = getc(ifp); break; case 3: imgdata.other.parsed_gps.longref = getc(ifp); break; case 5: imgdata.other.parsed_gps.altref = getc(ifp); break; case 2: if (len == 3) FORC(3) imgdata.other.parsed_gps.latitude[c] = getreal(type); break; case 4: if (len == 3) FORC(3) imgdata.other.parsed_gps.longtitude[c] = getreal(type); break; case 7: if (len == 3) FORC(3) imgdata.other.parsed_gps.gpstimestamp[c] = getreal(type); break; case 6: imgdata.other.parsed_gps.altitude = getreal(type); break; case 9: imgdata.other.parsed_gps.gpsstatus = getc(ifp); break; } fseek(ifp, save, SEEK_SET); } } #endif void CLASS parse_gps (int base) { unsigned entries, tag, type, len, save, c; entries = get2(); while (entries--) { tiff_get (base, &tag, &type, &len, &save); switch (tag) { case 1: case 3: case 5: gpsdata[29+tag/2] = getc(ifp); break; case 2: case 4: case 7: FORC(6) gpsdata[tag/3*6+c] = get4(); break; case 6: FORC(2) gpsdata[18+c] = get4(); break; case 18: case 29: fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp); } fseek (ifp, save, SEEK_SET); } } void CLASS romm_coeff (float romm_cam[3][3]) { static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */ { { 2.034193, -0.727420, -0.306766 }, { -0.228811, 1.231729, -0.002922 }, { -0.008565, -0.153273, 1.161839 } }; int i, j, k; for (i=0; i < 3; i++) for (j=0; j < 3; j++) for (cmatrix[i][j] = k=0; k < 3; k++) cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j]; #ifdef LIBRAW_LIBRARY_BUILD imgdata.color.digitalBack_color=1; #endif } void CLASS parse_mos (int offset) { char data[40]; int skip, from, i, c, neut[4], planes=0, frot=0; static const char *mod[] = { "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22", "Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65", "Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7", "Aptus-II 7","","","Aptus-II 6","","","Aptus-II 10","Aptus-II 5", "","","","","Aptus-II 10R","Aptus-II 8","","Aptus-II 12","","AFi-II 12" }; float romm_cam[3][3]; fseek (ifp, offset, SEEK_SET); while (1) { if (get4() != 0x504b5453) break; get4(); fread (data, 1, 40, ifp); skip = get4(); from = ftell(ifp); // IB start #ifdef LIBRAW_LIBRARY_BUILD if (!strcmp(data,"CameraObj_camera_type")) { fread(imgdata.lens.makernotes.body, skip, 1, ifp); } #endif // IB end if (!strcmp(data,"JPEG_preview_data")) { thumb_offset = from; thumb_length = skip; } if (!strcmp(data,"icc_camera_profile")) { profile_offset = from; profile_length = skip; } if (!strcmp(data,"ShootObj_back_type")) { fscanf (ifp, "%d", &i); if ((unsigned) i < sizeof mod / sizeof (*mod)) strcpy (model, mod[i]); } if (!strcmp(data,"icc_camera_to_tone_matrix")) { for (i=0; i < 9; i++) romm_cam[0][i] = int_to_float(get4()); romm_coeff (romm_cam); } if (!strcmp(data,"CaptProf_color_matrix")) { for (i=0; i < 9; i++) fscanf (ifp, "%f", &romm_cam[0][i]); romm_coeff (romm_cam); } if (!strcmp(data,"CaptProf_number_of_planes")) fscanf (ifp, "%d", &planes); if (!strcmp(data,"CaptProf_raw_data_rotation")) fscanf (ifp, "%d", &flip); if (!strcmp(data,"CaptProf_mosaic_pattern")) FORC4 { fscanf (ifp, "%d", &i); if (i == 1) frot = c ^ (c >> 1); } if (!strcmp(data,"ImgProf_rotation_angle")) { fscanf (ifp, "%d", &i); flip = i - flip; } if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) { FORC4 fscanf (ifp, "%d", neut+c); FORC3 cam_mul[c] = (float) neut[0] / neut[c+1]; } if (!strcmp(data,"Rows_data")) load_flags = get4(); parse_mos (from); fseek (ifp, skip+from, SEEK_SET); } if (planes) filters = (planes == 1) * 0x01010101 * (uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3]; } void CLASS linear_table (unsigned len) { int i; if (len > 0x10000) len = 0x10000; read_shorts (curve, len); for (i=len; i < 0x10000; i++) curve[i] = curve[i-1]; maximum = curve[len<0x1000?0xfff:len-1]; } #ifdef LIBRAW_LIBRARY_BUILD /* Thanks to Alexey Danilchenko for wb as-shot parsing code */ void CLASS parse_kodak_ifd (int base) { unsigned entries, tag, type, len, save; int i, c, wbi=-2; float mul[3]={1,1,1}, num; static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 }; entries = get2(); if (entries > 1024) return; while (entries--) { tiff_get (base, &tag, &type, &len, &save); #ifdef LIBRAW_LIBRARY_BUILD if(callbacks.exif_cb) { int savepos = ftell(ifp); callbacks.exif_cb(callbacks.exifparser_data,tag | 0x20000,type,len,order,ifp); fseek(ifp,savepos,SEEK_SET); } #endif if (tag == 1020) wbi = getint(type); if (tag == 1021 && len == 72) { /* WB set in software */ fseek (ifp, 40, SEEK_CUR); FORC3 cam_mul[c] = 2048.0 / get2(); wbi = -2; } if (tag == 2120 + wbi || (wbi<0 && tag == 2125)) /* use Auto WB if illuminant index is not set */ { FORC3 mul[c] = (num=getreal(type))==0 ? 1 : num; FORC3 cam_mul[c] = mul[1] / mul[c]; /* normalise against green */ } if (tag == 2317) linear_table (len); if (tag == 0x903) iso_speed = getreal(type); //if (tag == 6020) iso_speed = getint(type); if (tag == 64013) wbi = fgetc(ifp); if ((unsigned) wbi < 7 && tag == wbtag[wbi]) FORC3 cam_mul[c] = get4(); if (tag == 64019) width = getint(type); if (tag == 64020) height = (getint(type)+1) & -2; fseek (ifp, save, SEEK_SET); } } #else void CLASS parse_kodak_ifd (int base) { unsigned entries, tag, type, len, save; int i, c, wbi=-2, wbtemp=6500; float mul[3]={1,1,1}, num; static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 }; entries = get2(); if (entries > 1024) return; while (entries--) { tiff_get (base, &tag, &type, &len, &save); if (tag == 1020) wbi = getint(type); if (tag == 1021 && len == 72) { /* WB set in software */ fseek (ifp, 40, SEEK_CUR); FORC3 cam_mul[c] = 2048.0 / get2(); wbi = -2; } if (tag == 2118) wbtemp = getint(type); if (tag == 2120 + wbi && wbi >= 0) FORC3 cam_mul[c] = 2048.0 / getreal(type); if (tag == 2130 + wbi) FORC3 mul[c] = getreal(type); if (tag == 2140 + wbi && wbi >= 0) FORC3 { for (num=i=0; i < 4; i++) num += getreal(type) * pow (wbtemp/100.0, i); cam_mul[c] = 2048 / (num * mul[c]); } if (tag == 2317) linear_table (len); if (tag == 6020) iso_speed = getint(type); if (tag == 64013) wbi = fgetc(ifp); if ((unsigned) wbi < 7 && tag == wbtag[wbi]) FORC3 cam_mul[c] = get4(); if (tag == 64019) width = getint(type); if (tag == 64020) height = (getint(type)+1) & -2; fseek (ifp, save, SEEK_SET); } } #endif //@end COMMON void CLASS parse_minolta (int base); int CLASS parse_tiff (int base); //@out COMMON int CLASS parse_tiff_ifd (int base) { unsigned entries, tag, type, len, plen=16, save; int ifd, use_cm=0, cfa, i, j, c, ima_len=0; char *cbuf, *cp; uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256]; double cc[4][4], cm[4][3], cam_xyz[4][3], num; double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 }; unsigned sony_curve[] = { 0,0,0,0,0,4095 }; unsigned *buf, sony_offset=0, sony_length=0, sony_key=0; struct jhead jh; int pana_raw = 0; #ifndef LIBRAW_LIBRARY_BUILD FILE *sfp; #endif if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0]) return 1; ifd = tiff_nifds++; for (j=0; j < 4; j++) for (i=0; i < 4; i++) cc[j][i] = i == j; entries = get2(); if (entries > 512) return 1; while (entries--) { tiff_get (base, &tag, &type, &len, &save); // printf ("\n*** parse_tiff_ifd tag: 0x%04x", tag); #ifdef LIBRAW_LIBRARY_BUILD if(callbacks.exif_cb) { int savepos = ftell(ifp); callbacks.exif_cb(callbacks.exifparser_data,tag|(pana_raw?0x30000:0),type,len,order,ifp); fseek(ifp,savepos,SEEK_SET); } #endif switch (tag) { case 1: if(len==4) pana_raw = get4(); break; case 5: width = get2(); break; case 6: height = get2(); break; case 7: width += get2(); break; case 9: if ((i = get2())) filters = i; #ifdef LIBRAW_LIBRARY_BUILD if(pana_raw && len == 1 && type ==3) pana_black[3]+=i; #endif break; case 8: case 10: #ifdef LIBRAW_LIBRARY_BUILD if(pana_raw && len == 1 && type ==3) pana_black[3]+=get2(); #endif break; case 17: case 18: if (type == 3 && len == 1) cam_mul[(tag-17)*2] = get2() / 256.0; break; case 23: if (type == 3) iso_speed = get2(); break; case 28: case 29: case 30: #ifdef LIBRAW_LIBRARY_BUILD if(pana_raw && len == 1 && type ==3) { pana_black[tag-28] = get2(); } else #endif { cblack[tag-28] = get2(); cblack[3] = cblack[1]; } break; case 36: case 37: case 38: cam_mul[tag-36] = get2(); break; case 39: if (len < 50 || cam_mul[0]) break; fseek (ifp, 12, SEEK_CUR); FORC3 cam_mul[c] = get2(); break; case 46: if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break; thumb_offset = ftell(ifp) - 2; thumb_length = len; break; case 61440: /* Fuji HS10 table */ fseek (ifp, get4()+base, SEEK_SET); parse_tiff_ifd (base); break; case 2: case 256: case 61441: /* ImageWidth */ tiff_ifd[ifd].t_width = getint(type); break; case 3: case 257: case 61442: /* ImageHeight */ tiff_ifd[ifd].t_height = getint(type); break; case 258: /* BitsPerSample */ case 61443: tiff_ifd[ifd].samples = len & 7; tiff_ifd[ifd].bps = getint(type); break; case 61446: raw_height = 0; if (tiff_ifd[ifd].bps > 12) break; load_raw = &CLASS packed_load_raw; load_flags = get4() ? 24:80; break; case 259: /* Compression */ tiff_ifd[ifd].comp = getint(type); break; case 262: /* PhotometricInterpretation */ tiff_ifd[ifd].phint = get2(); break; case 270: /* ImageDescription */ fread (desc, 512, 1, ifp); break; case 271: /* Make */ fgets (make, 64, ifp); break; case 272: /* Model */ fgets (model, 64, ifp); break; case 280: /* Panasonic RW2 offset */ if (type != 4) break; load_raw = &CLASS panasonic_load_raw; load_flags = 0x2008; case 273: /* StripOffset */ case 513: /* JpegIFOffset */ case 61447: tiff_ifd[ifd].offset = get4()+base; if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) { fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { tiff_ifd[ifd].comp = 6; tiff_ifd[ifd].t_width = jh.wide; tiff_ifd[ifd].t_height = jh.high; tiff_ifd[ifd].bps = jh.bits; tiff_ifd[ifd].samples = jh.clrs; if (!(jh.sraw || (jh.clrs & 1))) tiff_ifd[ifd].t_width *= jh.clrs; i = order; parse_tiff (tiff_ifd[ifd].offset + 12); order = i; } } break; case 274: /* Orientation */ tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0'; break; case 277: /* SamplesPerPixel */ tiff_ifd[ifd].samples = getint(type) & 7; break; case 279: /* StripByteCounts */ case 514: case 61448: tiff_ifd[ifd].bytes = get4(); break; case 61454: FORC3 cam_mul[(4-c) % 3] = getint(type); break; case 305: case 11: /* Software */ fgets (software, 64, ifp); if (!strncmp(software,"Adobe",5) || !strncmp(software,"dcraw",5) || !strncmp(software,"UFRaw",5) || !strncmp(software,"Bibble",6) || !strcmp (software,"Digital Photo Professional")) is_raw = 0; break; case 306: /* DateTime */ get_timestamp(0); break; case 315: /* Artist */ fread (artist, 64, 1, ifp); break; case 322: /* TileWidth */ tiff_ifd[ifd].t_tile_width = getint(type); break; case 323: /* TileLength */ tiff_ifd[ifd].t_tile_length = getint(type); break; case 324: /* TileOffsets */ tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4(); if (len == 1) tiff_ifd[ifd].t_tile_width = tiff_ifd[ifd].t_tile_length = 0; if (len == 4) { load_raw = &CLASS sinar_4shot_load_raw; is_raw = 5; } break; #ifdef LIBRAW_LIBRARY_BUILD case 325: /* TileByteCount */ tiff_ifd[ifd].tile_maxbytes = 0; for(int jj=0;jj<len;jj++) { int s = get4(); if(s > tiff_ifd[ifd].tile_maxbytes) tiff_ifd[ifd].tile_maxbytes=s; } break; #endif case 330: /* SubIFDs */ if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) { load_raw = &CLASS sony_arw_load_raw; data_offset = get4()+base; ifd++; break; } #ifdef LIBRAW_LIBRARY_BUILD if (!strcmp(make,"Hasselblad") && libraw_internal_data.unpacker_data.hasselblad_parser_flag) { fseek (ifp, ftell(ifp)+4, SEEK_SET); fseek (ifp, get4()+base, SEEK_SET); parse_tiff_ifd (base); break; } #endif if(len > 1000) len=1000; /* 1000 SubIFDs is enough */ while (len--) { i = ftell(ifp); fseek (ifp, get4()+base, SEEK_SET); if (parse_tiff_ifd (base)) break; fseek (ifp, i+4, SEEK_SET); } break; case 400: strcpy (make, "Sarnoff"); maximum = 0xfff; break; #ifdef LIBRAW_LIBRARY_BUILD case 700: if((type == 1 || type == 2 || type == 6 || type == 7) && len > 1 && len < 5100000) { xmpdata = (char*)malloc(xmplen = len+1); fread(xmpdata,len,1,ifp); xmpdata[len]=0; } break; #endif case 28688: FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff; for (i=0; i < 5; i++) for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++) curve[j] = curve[j-1] + (1 << i); break; case 29184: sony_offset = get4(); break; case 29185: sony_length = get4(); break; case 29217: sony_key = get4(); break; case 29264: parse_minolta (ftell(ifp)); raw_width = 0; break; case 29443: FORC4 cam_mul[c ^ (c < 2)] = get2(); break; case 29459: FORC4 cam_mul[c] = get2(); i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1; SWAP (cam_mul[i],cam_mul[i+1]) break; case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800 for (i=0; i < 3; i++) FORC3 cmatrix[i][c] = ((short) get2()) / 1024.0; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, _(" Sony matrix:\n%f %f %f\n%f %f %f\n%f %f %f\n"), cmatrix[0][0], cmatrix[0][1], cmatrix[0][2], cmatrix[1][0], cmatrix[1][1], cmatrix[1][2], cmatrix[2][0], cmatrix[2][1], cmatrix[2][2]); #endif break; case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, no more needs to be divided by 4 FORC4 cblack[c ^ c >> 1] = get2(); i = cblack[3]; FORC3 if(i>cblack[c]) i = cblack[c]; FORC4 cblack[c]-=i; black = i; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]); #endif break; case 33405: /* Model2 */ fgets (model2, 64, ifp); break; case 33421: /* CFARepeatPatternDim */ if (get2() == 6 && get2() == 6) filters = 9; break; case 33422: /* CFAPattern */ if (filters == 9) { FORC(36) xtrans[0][c] = fgetc(ifp) & 3; break; } case 64777: /* Kodak P-series */ if(len == 36) { filters = 9; colors = 3; FORC(36) xtrans[0][c] = fgetc(ifp) & 3; } else { if ((plen=len) > 16) plen = 16; fread (cfa_pat, 1, plen, ifp); for (colors=cfa=i=0; i < plen && colors < 4; i++) { colors += !(cfa & (1 << cfa_pat[i])); cfa |= 1 << cfa_pat[i]; } if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */ if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */ goto guess_cfa_pc; } break; case 33424: case 65024: fseek (ifp, get4()+base, SEEK_SET); parse_kodak_ifd (base); break; case 33434: /* ExposureTime */ shutter = getreal(type); break; case 33437: /* FNumber */ aperture = getreal(type); break; #ifdef LIBRAW_LIBRARY_BUILD // IB start case 0xa405: // FocalLengthIn35mmFormat imgdata.lens.FocalLengthIn35mmFormat = get2(); break; case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard imgdata.lens.MinFocal = getreal(type); imgdata.lens.MaxFocal = getreal(type); imgdata.lens.MaxAp4MinFocal = getreal(type); imgdata.lens.MaxAp4MaxFocal = getreal(type); break; case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard imgdata.lens.MinFocal = getreal(type); imgdata.lens.MaxFocal = getreal(type); imgdata.lens.MaxAp4MinFocal = getreal(type); imgdata.lens.MaxAp4MaxFocal = getreal(type); break; case 0xa433: // LensMake fread(imgdata.lens.LensMake, MIN(len, sizeof(imgdata.lens.LensMake)), 1, ifp); break; case 0xa434: // LensModel fread(imgdata.lens.Lens, MIN(len, sizeof(imgdata.lens.Lens)), 1, ifp); if (!strncmp(imgdata.lens.Lens, "----", 4)) imgdata.lens.Lens[0] = 0; break; case 0x9205: imgdata.lens.EXIF_MaxAp = powf64(2.0f, (getreal(type) / 2.0f)); break; // IB end #endif case 34306: /* Leaf white balance */ FORC4 cam_mul[c ^ 1] = 4096.0 / get2(); break; case 34307: /* Leaf CatchLight color matrix */ fread (software, 1, 7, ifp); if (strncmp(software,"MATRIX",6)) break; colors = 4; for (raw_color = i=0; i < 3; i++) { FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]); if (!use_camera_wb) continue; num = 0; FORC4 num += rgb_cam[i][c]; FORC4 rgb_cam[i][c] /= num; } break; case 34310: /* Leaf metadata */ parse_mos (ftell(ifp)); case 34303: strcpy (make, "Leaf"); break; case 34665: /* EXIF tag */ fseek (ifp, get4()+base, SEEK_SET); parse_exif (base); break; case 34853: /* GPSInfo tag */ { unsigned pos; fseek(ifp, pos = (get4() + base), SEEK_SET); parse_gps(base); #ifdef LIBRAW_LIBRARY_BUILD fseek(ifp, pos, SEEK_SET); parse_gps_libraw(base); #endif } break; case 34675: /* InterColorProfile */ case 50831: /* AsShotICCProfile */ profile_offset = ftell(ifp); profile_length = len; break; case 37122: /* CompressedBitsPerPixel */ kodak_cbpp = get4(); break; case 37386: /* FocalLength */ focal_len = getreal(type); break; case 37393: /* ImageNumber */ shot_order = getint(type); break; case 37400: /* old Kodak KDC tag */ for (raw_color = i=0; i < 3; i++) { getreal(type); FORC3 rgb_cam[i][c] = getreal(type); } break; case 40976: strip_offset = get4(); switch (tiff_ifd[ifd].comp) { case 32770: load_raw = &CLASS samsung_load_raw; break; case 32772: load_raw = &CLASS samsung2_load_raw; break; case 32773: load_raw = &CLASS samsung3_load_raw; break; } break; case 46275: /* Imacon tags */ strcpy (make, "Imacon"); data_offset = ftell(ifp); ima_len = len; break; case 46279: if (!ima_len) break; fseek (ifp, 38, SEEK_CUR); case 46274: fseek (ifp, 40, SEEK_CUR); raw_width = get4(); raw_height = get4(); left_margin = get4() & 7; width = raw_width - left_margin - (get4() & 7); top_margin = get4() & 7; height = raw_height - top_margin - (get4() & 7); if (raw_width == 7262 && ima_len == 234317952 ) { height = 5412; width = 7216; left_margin = 7; filters=0; } else if (raw_width == 7262) { height = 5444; width = 7244; left_margin = 7; } fseek (ifp, 52, SEEK_CUR); FORC3 cam_mul[c] = getreal(11); fseek (ifp, 114, SEEK_CUR); flip = (get2() >> 7) * 90; if (width * height * 6 == ima_len) { if (flip % 180 == 90) SWAP(width,height); raw_width = width; raw_height = height; left_margin = top_margin = filters = flip = 0; } sprintf (model, "Ixpress %d-Mp", height*width/1000000); load_raw = &CLASS imacon_full_load_raw; if (filters) { if (left_margin & 1) filters = 0x61616161; load_raw = &CLASS unpacked_load_raw; } maximum = 0xffff; break; case 50454: /* Sinar tag */ case 50455: if (!(cbuf = (char *) malloc(len))) break; fread (cbuf, 1, len, ifp); for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n')) if (!strncmp (++cp,"Neutral ",8)) sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2); free (cbuf); break; case 50458: if (!make[0]) strcpy (make, "Hasselblad"); break; case 50459: /* Hasselblad tag */ #ifdef LIBRAW_LIBRARY_BUILD libraw_internal_data.unpacker_data.hasselblad_parser_flag=1; #endif i = order; j = ftell(ifp); c = tiff_nifds; order = get2(); fseek (ifp, j+(get2(),get4()), SEEK_SET); parse_tiff_ifd (j); maximum = 0xffff; tiff_nifds = c; order = i; break; case 50706: /* DNGVersion */ FORC4 dng_version = (dng_version << 8) + fgetc(ifp); if (!make[0]) strcpy (make, "DNG"); is_raw = 1; break; case 50710: /* CFAPlaneColor */ if (filters == 9) break; if (len > 4) len = 4; colors = len; fread (cfa_pc, 1, colors, ifp); guess_cfa_pc: FORCC tab[cfa_pc[c]] = c; cdesc[c] = 0; for (i=16; i--; ) filters = filters << 2 | tab[cfa_pat[i % plen]]; filters -= !filters; break; case 50711: /* CFALayout */ if (get2() == 2) { fuji_width = 1; filters = 0x49494949; } break; case 291: case 50712: /* LinearizationTable */ linear_table (len); break; case 50713: /* BlackLevelRepeatDim */ cblack[4] = get2(); cblack[5] = get2(); if (cblack[4] * cblack[5] > (sizeof(cblack) / sizeof (cblack[0]) - 6)) cblack[4] = cblack[5] = 1; break; case 61450: cblack[4] = cblack[5] = MIN(sqrt((double)len),64); case 50714: /* BlackLevel */ if((cblack[4] * cblack[5] < 2) && len == 1) { black = getreal(type); } else if(cblack[4] * cblack[5] <= len) { FORC (cblack[4] * cblack[5]) cblack[6+c] = getreal(type); black = 0; } break; case 50715: /* BlackLevelDeltaH */ case 50716: /* BlackLevelDeltaV */ for (num=i=0; i < len && i < 65536; i++) num += getreal(type); black += num/len + 0.5; break; case 50717: /* WhiteLevel */ maximum = getint(type); break; case 50718: /* DefaultScale */ pixel_aspect = getreal(type); pixel_aspect /= getreal(type); if(pixel_aspect > 0.995 && pixel_aspect < 1.005) pixel_aspect = 1.0; break; #ifdef LIBRAW_LIBRARY_BUILD case 50778: imgdata.color.dng_color[0].illuminant = get2(); break; case 50779: imgdata.color.dng_color[1].illuminant = get2(); break; #endif case 50721: /* ColorMatrix1 */ case 50722: /* ColorMatrix2 */ #ifdef LIBRAW_LIBRARY_BUILD i = tag == 50721?0:1; #endif FORCC for (j=0; j < 3; j++) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.color.dng_color[i].colormatrix[c][j]= #endif cm[c][j] = getreal(type); } use_cm = 1; break; case 50723: /* CameraCalibration1 */ case 50724: /* CameraCalibration2 */ #ifdef LIBRAW_LIBRARY_BUILD j = tag == 50723?0:1; #endif for (i=0; i < colors; i++) FORCC { #ifdef LIBRAW_LIBRARY_BUILD imgdata.color.dng_color[j].calibration[i][c]= #endif cc[i][c] = getreal(type); } break; case 50727: /* AnalogBalance */ FORCC ab[c] = getreal(type); break; case 50728: /* AsShotNeutral */ FORCC asn[c] = getreal(type); break; case 50729: /* AsShotWhiteXY */ xyz[0] = getreal(type); xyz[1] = getreal(type); xyz[2] = 1 - xyz[0] - xyz[1]; FORC3 xyz[c] /= d65_white[c]; break; #ifdef LIBRAW_LIBRARY_BUILD case 50730: /* DNG: Baseline Exposure */ baseline_exposure = getreal(type); break; #endif // IB start case 50740: /* tag 0xc634 : DNG Adobe, DNG Pentax, Sony SR2, DNG Private */ { char mbuf[64]; unsigned short makernote_found = 0; unsigned curr_pos, start_pos = ftell(ifp); unsigned MakN_order, m_sorder = order; unsigned MakN_length; unsigned pos_in_original_raw; fread(mbuf, 1, 6, ifp); if (!strcmp(mbuf, "Adobe")) { order = 0x4d4d; // Adobe header is always in "MM" / big endian curr_pos = start_pos + 6; while (curr_pos + 8 - start_pos <= len) { fread(mbuf, 1, 4, ifp); curr_pos += 8; if (!strncmp(mbuf, "MakN", 4)) { makernote_found = 1; MakN_length = get4(); MakN_order = get2(); pos_in_original_raw = get4(); order = MakN_order; parse_makernote_0xc634(curr_pos + 6 - pos_in_original_raw, 0, AdobeDNG); break; } } } else { fread(mbuf + 6, 1, 2, ifp); if (!strcmp(mbuf, "PENTAX ") || !strcmp(mbuf, "SAMSUNG")) { makernote_found = 1; fseek(ifp, start_pos, SEEK_SET); parse_makernote_0xc634(base, 0, CameraDNG); } } if (!makernote_found) fseek(ifp, start_pos, SEEK_SET); order = m_sorder; } // IB end if (dng_version) break; parse_minolta (j = get4()+base); fseek (ifp, j, SEEK_SET); parse_tiff_ifd (base); break; case 50752: read_shorts (cr2_slice, 3); break; case 50829: /* ActiveArea */ top_margin = getint(type); left_margin = getint(type); height = getint(type) - top_margin; width = getint(type) - left_margin; break; case 50830: /* MaskedAreas */ for (i=0; i < len && i < 32; i++) mask[0][i] = getint(type); black = 0; break; case 51009: /* OpcodeList2 */ meta_offset = ftell(ifp); break; case 64772: /* Kodak P-series */ if (len < 13) break; fseek (ifp, 16, SEEK_CUR); data_offset = get4(); fseek (ifp, 28, SEEK_CUR); data_offset += get4(); load_raw = &CLASS packed_load_raw; break; case 65026: if (type == 2) fgets (model2, 64, ifp); } fseek (ifp, save, SEEK_SET); } if (sony_length && (buf = (unsigned *) malloc(sony_length))) { fseek (ifp, sony_offset, SEEK_SET); fread (buf, sony_length, 1, ifp); sony_decrypt (buf, sony_length/4, 1, sony_key); #ifndef LIBRAW_LIBRARY_BUILD sfp = ifp; if ((ifp = tmpfile())) { fwrite (buf, sony_length, 1, ifp); fseek (ifp, 0, SEEK_SET); parse_tiff_ifd (-sony_offset); fclose (ifp); } ifp = sfp; #else if( !ifp->tempbuffer_open(buf,sony_length)) { parse_tiff_ifd(-sony_offset); ifp->tempbuffer_close(); } #endif free (buf); } for (i=0; i < colors; i++) FORCC cc[i][c] *= ab[i]; if (use_cm) { FORCC for (i=0; i < 3; i++) for (cam_xyz[c][i]=j=0; j < colors; j++) cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i]; cam_xyz_coeff (cmatrix, cam_xyz); } if (asn[0]) { cam_mul[3] = 0; FORCC cam_mul[c] = 1 / asn[c]; } if (!use_cm) FORCC pre_mul[c] /= cc[c][c]; return 0; } int CLASS parse_tiff (int base) { int doff; fseek (ifp, base, SEEK_SET); order = get2(); if (order != 0x4949 && order != 0x4d4d) return 0; get2(); while ((doff = get4())) { fseek (ifp, doff+base, SEEK_SET); if (parse_tiff_ifd (base)) break; } return 1; } void CLASS apply_tiff() { int max_samp=0, raw=-1, thm=-1, i; struct jhead jh; thumb_misc = 16; if (thumb_offset) { fseek (ifp, thumb_offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000) { thumb_misc = jh.bits; thumb_width = jh.wide; thumb_height = jh.high; } } } for (i=0; i < tiff_nifds; i++) { if (max_samp < tiff_ifd[i].samples) max_samp = tiff_ifd[i].samples; if (max_samp > 3) max_samp = 3; if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) && unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 && (unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 && tiff_ifd[i].t_width*tiff_ifd[i].t_height > raw_width*raw_height) { raw_width = tiff_ifd[i].t_width; raw_height = tiff_ifd[i].t_height; tiff_bps = tiff_ifd[i].bps; tiff_compress = tiff_ifd[i].comp; data_offset = tiff_ifd[i].offset; tiff_flip = tiff_ifd[i].t_flip; tiff_samples = tiff_ifd[i].samples; tile_width = tiff_ifd[i].t_tile_width; tile_length = tiff_ifd[i].t_tile_length; #ifdef LIBRAW_LIBRARY_BUILD data_size = tile_length < INT_MAX && tile_length>0 ? tiff_ifd[i].tile_maxbytes: tiff_ifd[i].bytes; #endif raw = i; } } if (!tile_width ) tile_width = INT_MAX; if (!tile_length) tile_length = INT_MAX; for (i=tiff_nifds; i--; ) if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip; if (raw >= 0 && !load_raw) switch (tiff_compress) { case 32767: if (tiff_ifd[raw].bytes == raw_width*raw_height) { tiff_bps = 12; load_raw = &CLASS sony_arw2_load_raw; break; } if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) { raw_height += 8; load_raw = &CLASS sony_arw_load_raw; break; } load_flags = 79; case 32769: load_flags++; case 32770: case 32773: goto slr; case 0: case 1: #ifdef LIBRAW_LIBRARY_BUILD if(!strcasecmp(make,"Nikon") && !strncmp(software,"Nikon Scan",10)) { load_raw = &CLASS nikon_coolscan_load_raw; raw_color = 1; filters = 0; break; } #endif if (!strncmp(make,"OLYMPUS",7) && tiff_ifd[raw].bytes*2 == raw_width*raw_height*3) load_flags = 24; if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) { load_flags = 81; tiff_bps = 12; } slr: switch (tiff_bps) { case 8: load_raw = &CLASS eight_bit_load_raw; break; case 12: if (tiff_ifd[raw].phint == 2) load_flags = 6; load_raw = &CLASS packed_load_raw; break; case 14: load_flags = 0; case 16: load_raw = &CLASS unpacked_load_raw; if (!strncmp(make,"OLYMPUS",7) && tiff_ifd[raw].bytes*7 > raw_width*raw_height) load_raw = &CLASS olympus_load_raw; } break; case 6: case 7: case 99: load_raw = &CLASS lossless_jpeg_load_raw; break; case 262: load_raw = &CLASS kodak_262_load_raw; break; case 34713: if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) { load_raw = &CLASS packed_load_raw; load_flags = 1; } else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes*2) { load_raw = &CLASS packed_load_raw; if (model[0] == 'N') load_flags = 80; } else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes) { load_raw = &CLASS nikon_yuv_load_raw; gamma_curve (1/2.4, 12.92, 1, 4095); memset (cblack, 0, sizeof cblack); filters = 0; } else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) { load_raw = &CLASS unpacked_load_raw; load_flags = 4; order = 0x4d4d; } else #ifdef LIBRAW_LIBRARY_BUILD if(raw_width*raw_height*3 == tiff_ifd[raw].bytes*2) { load_raw = &CLASS packed_load_raw; load_flags=80; } else #endif load_raw = &CLASS nikon_load_raw; break; case 65535: load_raw = &CLASS pentax_load_raw; break; case 65000: switch (tiff_ifd[raw].phint) { case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break; case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break; case 32803: load_raw = &CLASS kodak_65000_load_raw; } case 32867: case 34892: break; default: is_raw = 0; } if (!dng_version) if ( ((tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 && (tiff_compress & -16) != 32768) || (tiff_bps == 8 && !strcasestr(make,"Kodak") && !strstr(model2,"DEBUG RAW"))) && strncmp(software,"Nikon Scan",10)) is_raw = 0; for (i=0; i < tiff_nifds; i++) if (i != raw && tiff_ifd[i].samples == max_samp && tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33 && unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 && tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) > thumb_width * thumb_height / (SQR(thumb_misc)+1) && tiff_ifd[i].comp != 34892) { thumb_width = tiff_ifd[i].t_width; thumb_height = tiff_ifd[i].t_height; thumb_offset = tiff_ifd[i].offset; thumb_length = tiff_ifd[i].bytes; thumb_misc = tiff_ifd[i].bps; thm = i; } if (thm >= 0) { thumb_misc |= tiff_ifd[thm].samples << 5; switch (tiff_ifd[thm].comp) { case 0: write_thumb = &CLASS layer_thumb; break; case 1: if (tiff_ifd[thm].bps <= 8) write_thumb = &CLASS ppm_thumb; else if (!strcmp(make,"Imacon")) write_thumb = &CLASS ppm16_thumb; else thumb_load_raw = &CLASS kodak_thumb_load_raw; break; case 65000: thumb_load_raw = tiff_ifd[thm].phint == 6 ? &CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw; } } } void CLASS parse_minolta (int base) { int save, tag, len, offset, high=0, wide=0, i, c; short sorder=order; fseek (ifp, base, SEEK_SET); if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return; order = fgetc(ifp) * 0x101; offset = base + get4() + 8; while ((save=ftell(ifp)) < offset) { for (tag=i=0; i < 4; i++) tag = tag << 8 | fgetc(ifp); len = get4(); switch (tag) { case 0x505244: /* PRD */ fseek (ifp, 8, SEEK_CUR); high = get2(); wide = get2(); break; case 0x574247: /* WBG */ get4(); i = strcmp(model,"DiMAGE A200") ? 0:3; FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2(); break; case 0x545457: /* TTW */ parse_tiff (ftell(ifp)); data_offset = offset; } fseek (ifp, save+len+8, SEEK_SET); } raw_height = high; raw_width = wide; order = sorder; } /* Many cameras have a "debug mode" that writes JPEG and raw at the same time. The raw file has no header, so try to to open the matching JPEG file and read its metadata. */ void CLASS parse_external_jpeg() { const char *file, *ext; char *jname, *jfile, *jext; #ifndef LIBRAW_LIBRARY_BUILD FILE *save=ifp; #else #if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310) if(ifp->wfname()) { std::wstring rawfile(ifp->wfname()); rawfile.replace(rawfile.length()-3,3,L"JPG"); if(!ifp->subfile_open(rawfile.c_str())) { parse_tiff (12); thumb_offset = 0; is_raw = 1; ifp->subfile_close(); } else imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; return; } #endif if(!ifp->fname()) { imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; return; } #endif ext = strrchr (ifname, '.'); file = strrchr (ifname, '/'); if (!file) file = strrchr (ifname, '\\'); #ifndef LIBRAW_LIBRARY_BUILD if (!file) file = ifname-1; #else if (!file) file = (char*)ifname-1; #endif file++; if (!ext || strlen(ext) != 4 || ext-file != 8) return; jname = (char *) malloc (strlen(ifname) + 1); merror (jname, "parse_external_jpeg()"); strcpy (jname, ifname); jfile = file - ifname + jname; jext = ext - ifname + jname; if (strcasecmp (ext, ".jpg")) { strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg"); if (isdigit(*file)) { memcpy (jfile, file+4, 4); memcpy (jfile+4, file, 4); } } else while (isdigit(*--jext)) { if (*jext != '9') { (*jext)++; break; } *jext = '0'; } #ifndef LIBRAW_LIBRARY_BUILD if (strcmp (jname, ifname)) { if ((ifp = fopen (jname, "rb"))) { #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Reading metadata from %s ...\n"), jname); #endif parse_tiff (12); thumb_offset = 0; is_raw = 1; fclose (ifp); } } #else if (strcmp (jname, ifname)) { if(!ifp->subfile_open(jname)) { parse_tiff (12); thumb_offset = 0; is_raw = 1; ifp->subfile_close(); } else imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; } #endif if (!timestamp) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("Failed to read metadata from %s\n"), jname); #endif } free (jname); #ifndef LIBRAW_LIBRARY_BUILD ifp = save; #endif } /* CIFF block 0x1030 contains an 8x8 white sample. Load this into white[][] for use in scale_colors(). */ void CLASS ciff_block_1030() { static const ushort key[] = { 0x410, 0x45f3 }; int i, bpp, row, col, vbits=0; unsigned long bitbuf=0; if ((get2(),get4()) != 0x80008 || !get4()) return; bpp = get2(); if (bpp != 10 && bpp != 12) return; for (i=row=0; row < 8; row++) for (col=0; col < 8; col++) { if (vbits < bpp) { bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]); vbits += 16; } white[row][col] = bitbuf << (LONG_BIT - vbits) >> (LONG_BIT - bpp); vbits -= bpp; } } /* Parse a CIFF file, better known as Canon CRW format. */ void CLASS parse_ciff (int offset, int length, int depth) { int tboff, nrecs, c, type, len, save, wbi=-1; ushort key[] = { 0x410, 0x45f3 }; fseek (ifp, offset+length-4, SEEK_SET); tboff = get4() + offset; fseek (ifp, tboff, SEEK_SET); nrecs = get2(); if ((nrecs | depth) > 127) return; while (nrecs--) { type = get2(); len = get4(); // printf ("\n*** type: 0x%04x len: 0x%04x", type, len); save = ftell(ifp) + 4; fseek (ifp, offset+get4(), SEEK_SET); if ((((type >> 8) + 8) | 8) == 0x38) { parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */ } if (type == 0x0810) fread (artist, 64, 1, ifp); if (type == 0x080a) { fread (make, 64, 1, ifp); fseek (ifp, strlen(make) - 63, SEEK_CUR); fread (model, 64, 1, ifp); } if (type == 0x1810) { width = get4(); height = get4(); pixel_aspect = int_to_float(get4()); flip = get4(); } if (type == 0x1835) /* Get the decoder table */ tiff_compress = get4(); if (type == 0x2007) { thumb_offset = ftell(ifp); thumb_length = len; } if (type == 0x1818) { shutter = powf64(2.0f, -int_to_float((get4(),get4()))); aperture = powf64(2.0f, int_to_float(get4())/2); #ifdef LIBRAW_LIBRARY_BUILD imgdata.lens.makernotes.CurAp = aperture; #endif } if (type == 0x102a) { // iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50; iso_speed = powf64(2.0f, ((get2(),get2()) + get2())/32.0f - 5.0f) * 100.0f; #ifdef LIBRAW_LIBRARY_BUILD aperture = _CanonConvertAperture((get2(),get2())); imgdata.lens.makernotes.CurAp = aperture; #else aperture = powf64(2.0, (get2(),(short)get2())/64.0); #endif shutter = powf64(2.0,-((short)get2())/32.0); wbi = (get2(),get2()); if (wbi > 17) wbi = 0; fseek (ifp, 32, SEEK_CUR); if (shutter > 1e6) shutter = get2()/10.0; } if (type == 0x102c) { if (get2() > 512) { /* Pro90, G1 */ fseek (ifp, 118, SEEK_CUR); FORC4 cam_mul[c ^ 2] = get2(); } else { /* G2, S30, S40 */ fseek (ifp, 98, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2(); } } #ifdef LIBRAW_LIBRARY_BUILD if (type == 0x102d) { fseek(ifp, 44, SEEK_CUR); imgdata.lens.makernotes.LensID = get2(); imgdata.lens.makernotes.MaxFocal = get2(); imgdata.lens.makernotes.MinFocal = get2(); imgdata.lens.makernotes.CanonFocalUnits = get2(); if (imgdata.lens.makernotes.CanonFocalUnits != 1) { imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; } imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2()); imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2()); } #endif if (type == 0x0032) { if (len == 768) { /* EOS D30 */ fseek (ifp, 72, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1)] = 1024.0 / get2(); if (!wbi) cam_mul[0] = -1; /* use my auto white balance */ } else if (!cam_mul[0]) { if (get2() == key[0]) /* Pro1, G6, S60, S70 */ c = (strstr(model,"Pro1") ? "012346000000000000":"01345:000000006008")[wbi]-'0'+ 2; else { /* G3, G5, S45, S50 */ c = "023457000000006000"[wbi]-'0'; key[0] = key[1] = 0; } fseek (ifp, 78 + c*8, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1]; if (!wbi) cam_mul[0] = -1; } } if (type == 0x10a9) { /* D60, 10D, 300D, and clones */ if (len > 66) wbi = "0134567028"[wbi]-'0'; fseek (ifp, 2 + wbi*8, SEEK_CUR); FORC4 cam_mul[c ^ (c >> 1)] = get2(); } if (type == 0x1030 && (0x18040 >> wbi & 1)) ciff_block_1030(); /* all that don't have 0x10a9 */ if (type == 0x1031) { raw_width = (get2(),get2()); raw_height = get2(); } if (type == 0x501c) { iso_speed = len & 0xffff; } if (type == 0x5029) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.lens.makernotes.CurFocal = len >> 16; imgdata.lens.makernotes.FocalType = len & 0xffff; if (imgdata.lens.makernotes.FocalType == 2) { imgdata.lens.makernotes.CanonFocalUnits = 32; imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits; } focal_len = imgdata.lens.makernotes.CurFocal; #else focal_len = len >> 16; if ((len & 0xffff) == 2) focal_len /= 32; #endif } if (type == 0x5813) flash_used = int_to_float(len); if (type == 0x5814) canon_ev = int_to_float(len); if (type == 0x5817) shot_order = len; if (type == 0x5834) { unique_id = len; #ifdef LIBRAW_LIBRARY_BUILD setCanonBodyFeatures(unique_id); #endif } if (type == 0x580e) timestamp = len; if (type == 0x180e) timestamp = get4(); #ifdef LOCALTIME if ((type | 0x4000) == 0x580e) timestamp = mktime (gmtime (&timestamp)); #endif fseek (ifp, save, SEEK_SET); } } void CLASS parse_rollei() { char line[128], *val; struct tm t; fseek (ifp, 0, SEEK_SET); memset (&t, 0, sizeof t); do { fgets (line, 128, ifp); if ((val = strchr(line,'='))) *val++ = 0; else val = line + strlen(line); if (!strcmp(line,"DAT")) sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year); if (!strcmp(line,"TIM")) sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec); if (!strcmp(line,"HDR")) thumb_offset = atoi(val); if (!strcmp(line,"X ")) raw_width = atoi(val); if (!strcmp(line,"Y ")) raw_height = atoi(val); if (!strcmp(line,"TX ")) thumb_width = atoi(val); if (!strcmp(line,"TY ")) thumb_height = atoi(val); } while (strncmp(line,"EOHD",4)); data_offset = thumb_offset + thumb_width * thumb_height * 2; t.tm_year -= 1900; t.tm_mon -= 1; if (mktime(&t) > 0) timestamp = mktime(&t); strcpy (make, "Rollei"); strcpy (model,"d530flex"); write_thumb = &CLASS rollei_thumb; } void CLASS parse_sinar_ia() { int entries, off; char str[8], *cp; order = 0x4949; fseek (ifp, 4, SEEK_SET); entries = get4(); fseek (ifp, get4(), SEEK_SET); while (entries--) { off = get4(); get4(); fread (str, 8, 1, ifp); if (!strcmp(str,"META")) meta_offset = off; if (!strcmp(str,"THUMB")) thumb_offset = off; if (!strcmp(str,"RAW0")) data_offset = off; } fseek (ifp, meta_offset+20, SEEK_SET); fread (make, 64, 1, ifp); make[63] = 0; if ((cp = strchr(make,' '))) { strcpy (model, cp+1); *cp = 0; } raw_width = get2(); raw_height = get2(); load_raw = &CLASS unpacked_load_raw; thumb_width = (get4(),get2()); thumb_height = get2(); write_thumb = &CLASS ppm_thumb; maximum = 0x3fff; } void CLASS parse_phase_one (int base) { unsigned entries, tag, type, len, data, save, i, c; float romm_cam[3][3]; char *cp; #ifdef LIBRAW_LIBRARY_BUILD char body_id[3]; body_id[0] = 0; #endif memset (&ph1, 0, sizeof ph1); fseek (ifp, base, SEEK_SET); order = get4() & 0xffff; if (get4() >> 8 != 0x526177) return; /* "Raw" */ fseek (ifp, get4()+base, SEEK_SET); entries = get4(); get4(); while (entries--) { tag = get4(); type = get4(); len = get4(); data = get4(); save = ftell(ifp); fseek (ifp, base+data, SEEK_SET); switch (tag) { #ifdef LIBRAW_LIBRARY_BUILD case 0x0102: fread(body_id, 1, 3, ifp); if ((body_id[0] == 0x4c) && (body_id[1] == 0x49)) { body_id[1] = body_id[2]; } unique_id = (((body_id[0] & 0x3f) << 5) | (body_id[1] & 0x3f)) - 0x41; setPhaseOneFeatures(unique_id); break; case 0x0401: if (type == 4) imgdata.lens.makernotes.CurAp = powf64(2.0f, (int_to_float(data)/2.0f)); else imgdata.lens.makernotes.CurAp = powf64(2.0f, (getreal(type)/2.0f)); break; case 0x0403: if (type == 4) imgdata.lens.makernotes.CurFocal = int_to_float(data); else imgdata.lens.makernotes.CurFocal = getreal(type); break; case 0x0410: fread(imgdata.lens.makernotes.body, 1, len, ifp); break; case 0x0412: fread(imgdata.lens.makernotes.Lens, 1, len, ifp); break; case 0x0414: if (type == 4) { imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (int_to_float(data)/2.0f)); } else { imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (getreal(type) / 2.0f)); } break; case 0x0415: if (type == 4) { imgdata.lens.makernotes.MinAp4CurFocal = powf64(2.0f, (int_to_float(data)/2.0f)); } else { imgdata.lens.makernotes.MinAp4CurFocal = powf64(2.0f, (getreal(type) / 2.0f)); } break; case 0x0416: if (type == 4) { imgdata.lens.makernotes.MinFocal = int_to_float(data); } else { imgdata.lens.makernotes.MinFocal = getreal(type); } if (imgdata.lens.makernotes.MinFocal > 1000.0f) { imgdata.lens.makernotes.MinFocal = 0.0f; } break; case 0x0417: if (type == 4) { imgdata.lens.makernotes.MaxFocal = int_to_float(data); } else { imgdata.lens.makernotes.MaxFocal = getreal(type); } break; #endif case 0x100: flip = "0653"[data & 3]-'0'; break; case 0x106: for (i=0; i < 9; i++) romm_cam[0][i] = getreal(11); romm_coeff (romm_cam); break; case 0x107: FORC3 cam_mul[c] = getreal(11); break; case 0x108: raw_width = data; break; case 0x109: raw_height = data; break; case 0x10a: left_margin = data; break; case 0x10b: top_margin = data; break; case 0x10c: width = data; break; case 0x10d: height = data; break; case 0x10e: ph1.format = data; break; case 0x10f: data_offset = data+base; break; case 0x110: meta_offset = data+base; meta_length = len; break; case 0x112: ph1.key_off = save - 4; break; case 0x210: ph1.tag_210 = int_to_float(data); break; case 0x21a: ph1.tag_21a = data; break; case 0x21c: strip_offset = data+base; break; case 0x21d: ph1.t_black = data; break; case 0x222: ph1.split_col = data; break; case 0x223: ph1.black_col = data+base; break; case 0x224: ph1.split_row = data; break; case 0x225: ph1.black_row = data+base; break; case 0x301: model[63] = 0; fread (model, 1, 63, ifp); if ((cp = strstr(model," camera"))) *cp = 0; } fseek (ifp, save, SEEK_SET); } #ifdef LIBRAW_LIBRARY_BUILD if (!imgdata.lens.makernotes.body[0] && !body_id[0]) { fseek (ifp, meta_offset, SEEK_SET); order = get2(); fseek (ifp, 6, SEEK_CUR); fseek (ifp, meta_offset+get4(), SEEK_SET); entries = get4(); get4(); while (entries--) { tag = get4(); len = get4(); data = get4(); save = ftell(ifp); fseek (ifp, meta_offset+data, SEEK_SET); if (tag == 0x0407) { fread(body_id, 1, 3, ifp); if ((body_id[0] == 0x4c) && (body_id[1] == 0x49)) { body_id[1] = body_id[2]; } unique_id = (((body_id[0] & 0x3f) << 5) | (body_id[1] & 0x3f)) - 0x41; setPhaseOneFeatures(unique_id); } fseek (ifp, save, SEEK_SET); } } #endif load_raw = ph1.format < 3 ? &CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c; maximum = 0xffff; strcpy (make, "Phase One"); if (model[0]) return; switch (raw_height) { case 2060: strcpy (model,"LightPhase"); break; case 2682: strcpy (model,"H 10"); break; case 4128: strcpy (model,"H 20"); break; case 5488: strcpy (model,"H 25"); break; } } void CLASS parse_fuji (int offset) { unsigned entries, tag, len, save, c; fseek (ifp, offset, SEEK_SET); entries = get4(); if (entries > 255) return; while (entries--) { tag = get2(); len = get2(); save = ftell(ifp); if (tag == 0x100) { raw_height = get2(); raw_width = get2(); } else if (tag == 0x121) { height = get2(); if ((width = get2()) == 4284) width += 3; } else if (tag == 0x130) { fuji_layout = fgetc(ifp) >> 7; fuji_width = !(fgetc(ifp) & 8); } else if (tag == 0x131) { filters = 9; FORC(36) xtrans_abs[0][35-c] = fgetc(ifp) & 3; } else if (tag == 0x2ff0) { FORC4 cam_mul[c ^ 1] = get2(); } else if (tag == 0xc000) { c = order; order = 0x4949; if ((tag = get4()) > 10000) tag = get4(); width = tag; height = get4(); order = c; } fseek (ifp, save+len, SEEK_SET); } height <<= fuji_layout; width >>= fuji_layout; } int CLASS parse_jpeg (int offset) { int len, save, hlen, mark; fseek (ifp, offset, SEEK_SET); if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0; while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) { order = 0x4d4d; len = get2() - 2; save = ftell(ifp); if (mark == 0xc0 || mark == 0xc3) { fgetc(ifp); raw_height = get2(); raw_width = get2(); } order = get2(); hlen = get4(); if (get4() == 0x48454150) /* "HEAP" */ { #ifdef LIBRAW_LIBRARY_BUILD imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; #endif parse_ciff (save+hlen, len-hlen, 0); } if (parse_tiff (save+6)) apply_tiff(); fseek (ifp, save+len, SEEK_SET); } return 1; } void CLASS parse_riff() { unsigned i, size, end; char tag[4], date[64], month[64]; static const char mon[12][4] = { "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" }; struct tm t; order = 0x4949; fread (tag, 4, 1, ifp); size = get4(); end = ftell(ifp) + size; if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) { int maxloop = 1000; get4(); while (ftell(ifp)+7 < end && !feof(ifp) && maxloop--) parse_riff(); } else if (!memcmp(tag,"nctg",4)) { while (ftell(ifp)+7 < end) { i = get2(); size = get2(); if ((i+1) >> 1 == 10 && size == 20) get_timestamp(0); else fseek (ifp, size, SEEK_CUR); } } else if (!memcmp(tag,"IDIT",4) && size < 64) { fread (date, 64, 1, ifp); date[size] = 0; memset (&t, 0, sizeof t); if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) { for (i=0; i < 12 && strcasecmp(mon[i],month); i++); t.tm_mon = i; t.tm_year -= 1900; if (mktime(&t) > 0) timestamp = mktime(&t); } } else fseek (ifp, size, SEEK_CUR); } void CLASS parse_qt (int end) { unsigned save, size; char tag[4]; order = 0x4d4d; while (ftell(ifp)+7 < end) { save = ftell(ifp); if ((size = get4()) < 8) return; fread (tag, 4, 1, ifp); if (!memcmp(tag,"moov",4) || !memcmp(tag,"udta",4) || !memcmp(tag,"CNTH",4)) parse_qt (save+size); if (!memcmp(tag,"CNDA",4)) parse_jpeg (ftell(ifp)); fseek (ifp, save+size, SEEK_SET); } } void CLASS parse_smal (int offset, int fsize) { int ver; fseek (ifp, offset+2, SEEK_SET); order = 0x4949; ver = fgetc(ifp); if (ver == 6) fseek (ifp, 5, SEEK_CUR); if (get4() != fsize) return; if (ver > 6) data_offset = get4(); raw_height = height = get2(); raw_width = width = get2(); strcpy (make, "SMaL"); sprintf (model, "v%d %dx%d", ver, width, height); if (ver == 6) load_raw = &CLASS smal_v6_load_raw; if (ver == 9) load_raw = &CLASS smal_v9_load_raw; } void CLASS parse_cine() { unsigned off_head, off_setup, off_image, i; order = 0x4949; fseek (ifp, 4, SEEK_SET); is_raw = get2() == 2; fseek (ifp, 14, SEEK_CUR); is_raw *= get4(); off_head = get4(); off_setup = get4(); off_image = get4(); timestamp = get4(); if ((i = get4())) timestamp = i; fseek (ifp, off_head+4, SEEK_SET); raw_width = get4(); raw_height = get4(); switch (get2(),get2()) { case 8: load_raw = &CLASS eight_bit_load_raw; break; case 16: load_raw = &CLASS unpacked_load_raw; } fseek (ifp, off_setup+792, SEEK_SET); strcpy (make, "CINE"); sprintf (model, "%d", get4()); fseek (ifp, 12, SEEK_CUR); switch ((i=get4()) & 0xffffff) { case 3: filters = 0x94949494; break; case 4: filters = 0x49494949; break; default: is_raw = 0; } fseek (ifp, 72, SEEK_CUR); switch ((get4()+3600) % 360) { case 270: flip = 4; break; case 180: flip = 1; break; case 90: flip = 7; break; case 0: flip = 2; } cam_mul[0] = getreal(11); cam_mul[2] = getreal(11); maximum = ~((~0u) << get4()); fseek (ifp, 668, SEEK_CUR); shutter = get4()/1000000000.0; fseek (ifp, off_image, SEEK_SET); if (shot_select < is_raw) fseek (ifp, shot_select*8, SEEK_CUR); data_offset = (INT64) get4() + 8; data_offset += (INT64) get4() << 32; } void CLASS parse_redcine() { unsigned i, len, rdvo; order = 0x4d4d; is_raw = 0; fseek (ifp, 52, SEEK_SET); width = get4(); height = get4(); fseek (ifp, 0, SEEK_END); fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR); if (get4() != i || get4() != 0x52454f42) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname); #endif fseek (ifp, 0, SEEK_SET); while ((len = get4()) != EOF) { if (get4() == 0x52454456) if (is_raw++ == shot_select) data_offset = ftello(ifp) - 8; fseek (ifp, len-8, SEEK_CUR); } } else { rdvo = get4(); fseek (ifp, 12, SEEK_CUR); is_raw = get4(); fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET); data_offset = get4(); } } //@end COMMON char * CLASS foveon_gets (int offset, char *str, int len) { int i; fseek (ifp, offset, SEEK_SET); for (i=0; i < len-1; i++) if ((str[i] = get2()) == 0) break; str[i] = 0; return str; } void CLASS parse_foveon() { int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2]; char name[64], value[64]; order = 0x4949; /* Little-endian */ fseek (ifp, 36, SEEK_SET); flip = get4(); fseek (ifp, -4, SEEK_END); fseek (ifp, get4(), SEEK_SET); if (get4() != 0x64434553) return; /* SECd */ entries = (get4(),get4()); while (entries--) { off = get4(); len = get4(); tag = get4(); save = ftell(ifp); fseek (ifp, off, SEEK_SET); if (get4() != (0x20434553 | (tag << 24))) return; switch (tag) { case 0x47414d49: /* IMAG */ case 0x32414d49: /* IMA2 */ fseek (ifp, 8, SEEK_CUR); pent = get4(); wide = get4(); high = get4(); if (wide > raw_width && high > raw_height) { switch (pent) { case 5: load_flags = 1; case 6: load_raw = &CLASS foveon_sd_load_raw; break; case 30: load_raw = &CLASS foveon_dp_load_raw; break; default: load_raw = 0; } raw_width = wide; raw_height = high; data_offset = off+28; is_foveon = 1; } fseek (ifp, off+28, SEEK_SET); if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8 && thumb_length < len-28) { thumb_offset = off+28; thumb_length = len-28; write_thumb = &CLASS jpeg_thumb; } if (++img == 2 && !thumb_length) { thumb_offset = off+24; thumb_width = wide; thumb_height = high; write_thumb = &CLASS foveon_thumb; } break; case 0x464d4143: /* CAMF */ meta_offset = off+8; meta_length = len-28; break; case 0x504f5250: /* PROP */ pent = (get4(),get4()); fseek (ifp, 12, SEEK_CUR); off += pent*8 + 24; if ((unsigned) pent > 256) pent=256; for (i=0; i < pent*2; i++) poff[0][i] = off + get4()*2; for (i=0; i < pent; i++) { foveon_gets (poff[i][0], name, 64); foveon_gets (poff[i][1], value, 64); if (!strcmp (name, "ISO")) iso_speed = atoi(value); if (!strcmp (name, "CAMMANUF")) strcpy (make, value); if (!strcmp (name, "CAMMODEL")) strcpy (model, value); if (!strcmp (name, "WB_DESC")) strcpy (model2, value); if (!strcmp (name, "TIME")) timestamp = atoi(value); if (!strcmp (name, "EXPTIME")) shutter = atoi(value) / 1000000.0; if (!strcmp (name, "APERTURE")) aperture = atof(value); if (!strcmp (name, "FLENGTH")) focal_len = atof(value); #ifdef LIBRAW_LIBRARY_BUILD if (!strcmp (name, "FLEQ35MM")) imgdata.lens.makernotes.FocalLengthIn35mmFormat = atof(value); if (!strcmp (name, "LENSARANGE")) { char *sp; imgdata.lens.makernotes.MaxAp4CurFocal = imgdata.lens.makernotes.MinAp4CurFocal = atof(value); sp = strrchr (value, ' '); if (sp) { imgdata.lens.makernotes.MinAp4CurFocal = atof(sp); if (imgdata.lens.makernotes.MaxAp4CurFocal > imgdata.lens.makernotes.MinAp4CurFocal) my_swap (float, imgdata.lens.makernotes.MaxAp4CurFocal, imgdata.lens.makernotes.MinAp4CurFocal); } } if (!strcmp (name, "LENSFRANGE")) { char *sp; imgdata.lens.makernotes.MinFocal = imgdata.lens.makernotes.MaxFocal = atof(value); sp = strrchr (value, ' '); if (sp) { imgdata.lens.makernotes.MaxFocal = atof(sp); if ((imgdata.lens.makernotes.MaxFocal + 0.17f) < imgdata.lens.makernotes.MinFocal) my_swap (float, imgdata.lens.makernotes.MaxFocal, imgdata.lens.makernotes.MinFocal); } } if (!strcmp (name, "LENSMODEL")) { imgdata.lens.makernotes.LensID = atoi(value); if (imgdata.lens.makernotes.LensID) imgdata.lens.makernotes.LensMount = Sigma_X3F; } } #endif } #ifdef LOCALTIME timestamp = mktime (gmtime (&timestamp)); #endif } fseek (ifp, save, SEEK_SET); } } //@out COMMON /* All matrices are from Adobe DNG Converter unless otherwise noted. */ void CLASS adobe_coeff (const char *t_make, const char *t_model #ifdef LIBRAW_LIBRARY_BUILD ,int internal_only #endif ) { static const struct { const char *prefix; int t_black, t_maximum, trans[12]; } table[] = { { "AgfaPhoto DC-833m", 0, 0, /* DJC */ { 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } }, { "Apple QuickTake", 0, 0, /* DJC */ { 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } }, { "Canon EOS D2000", 0, 0, { 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } }, { "Canon EOS D6000", 0, 0, { 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } }, { "Canon EOS D30", 0, 0, { 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } }, { "Canon EOS D60", 0, 0xfa0, { 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } }, { "Canon EOS 5D Mark III", 0, 0x3c80, { 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } }, { "Canon EOS 5D Mark II", 0, 0x3cf0, { 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } }, { "Canon EOS 5D", 0, 0xe6c, { 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } }, { "Canon EOS 6D", 0, 0x3c82, { 8621,-2197,-787,-3150,11358,912,-1161,2400,4836 } }, { "Canon EOS 7D Mark II", 0, 0x3510, { 7268,-1082,-969,-4186,11839,2663,-825,2029,5839 } }, { "Canon EOS 7D", 0, 0x3510, { 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } }, { "Canon EOS 10D", 0, 0xfa0, { 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } }, { "Canon EOS 20Da", 0, 0, { 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } }, { "Canon EOS 20D", 0, 0xfff, { 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } }, { "Canon EOS 30D", 0, 0, { 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } }, { "Canon EOS 40D", 0, 0x3f60, { 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } }, { "Canon EOS 50D", 0, 0x3d93, { 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } }, { "Canon EOS 60D", 0, 0x2ff7, { 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } }, { "Canon EOS 70D", 0, 0x3bc7, { 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } }, { "Canon EOS 100D", 0, 0x350f, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 300D", 0, 0xfa0, { 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } }, { "Canon EOS 350D", 0, 0xfff, { 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } }, { "Canon EOS 400D", 0, 0xe8e, { 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } }, { "Canon EOS 450D", 0, 0x390d, { 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } }, { "Canon EOS 500D", 0, 0x3479, { 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } }, { "Canon EOS 550D", 0, 0x3dd7, { 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } }, { "Canon EOS 600D", 0, 0x3510, { 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } }, { "Canon EOS 650D", 0, 0x354d, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 700D", 0, 0x3c00, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS 1000D", 0, 0xe43, { 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } }, { "Canon EOS 1100D", 0, 0x3510, { 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } }, { "Canon EOS 1200D", 0, 0x37c2, { 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } }, { "Canon EOS M", 0, 0, { 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } }, { "Canon EOS-1Ds Mark III", 0, 0x3bb0, { 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } }, { "Canon EOS-1Ds Mark II", 0, 0xe80, { 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } }, { "Canon EOS-1D Mark IV", 0, 0x3bb0, { 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } }, { "Canon EOS-1D Mark III", 0, 0x3bb0, { 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } }, { "Canon EOS-1D Mark II N", 0, 0xe80, { 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } }, { "Canon EOS-1D Mark II", 0, 0xe80, { 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } }, { "Canon EOS-1DS", 0, 0xe20, { 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } }, { "Canon EOS-1D C", 0, 0x3c4e, { 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } }, { "Canon EOS-1D X", 0, 0x3c4e, { 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } }, { "Canon EOS-1D", 0, 0xe20, { 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } }, { "Canon EOS C500", 853, 0, /* DJC */ { 17851,-10604,922,-7425,16662,763,-3660,3636,22278 } }, { "Canon PowerShot A530", 0, 0, { 0 } }, /* don't want the A5 matrix */ { "Canon PowerShot A50", 0, 0, { -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } }, { "Canon PowerShot A5", 0, 0, { -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } }, { "Canon PowerShot G10", 0, 0, { 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } }, { "Canon PowerShot G11", 0, 0, { 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } }, { "Canon PowerShot G12", 0, 0, { 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } }, { "Canon PowerShot G15", 0, 0, { 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } }, { "Canon PowerShot G16", 0, 0, { 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } }, { "Canon PowerShot G1 X Mark II", 0, 0, { 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } }, { "Canon PowerShot G1 X", 0, 0, { 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } }, { "Canon PowerShot G1", 0, 0, { -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } }, { "Canon PowerShot G2", 0, 0, { 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } }, { "Canon PowerShot G3", 0, 0, { 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } }, { "Canon PowerShot G5", 0, 0, { 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } }, { "Canon PowerShot G6", 0, 0, { 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } }, { "Canon PowerShot G7 X", 0, 0, { 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } }, { "Canon PowerShot G9", 0, 0, { 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } }, { "Canon PowerShot Pro1", 0, 0, { 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } }, { "Canon PowerShot Pro70", 34, 0, { -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } }, { "Canon PowerShot Pro90", 0, 0, { -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } }, { "Canon PowerShot S30", 0, 0, { 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } }, { "Canon PowerShot S40", 0, 0, { 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } }, { "Canon PowerShot S45", 0, 0, { 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } }, { "Canon PowerShot S50", 0, 0, { 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } }, { "Canon PowerShot S60", 0, 0, { 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } }, { "Canon PowerShot S70", 0, 0, { 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } }, { "Canon PowerShot S90", 0, 0, { 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } }, { "Canon PowerShot S95", 0, 0, { 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } }, { "Canon PowerShot S120", 0, 0, /* LibRaw */ { 10800,-4782,-628,-2057,10783,1176,-802,2091,4739 } }, { "Canon PowerShot S110", 0, 0, { 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } }, { "Canon PowerShot S100", 0, 0, { 7968,-2565,-636,-2873,10697,2513,180,667,4211 } }, { "Canon PowerShot SX1 IS", 0, 0, { 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } }, { "Canon PowerShot SX50 HS", 0, 0, { 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } }, { "Canon PowerShot SX60 HS", 0, 0, { 13161,-5451,-1344,-1989,10654,1531,-47,1271,4955 } }, { "Canon PowerShot A3300", 0, 0, /* DJC */ { 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } }, { "Canon PowerShot A470", 0, 0, /* DJC */ { 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } }, { "Canon PowerShot A610", 0, 0, /* DJC */ { 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } }, { "Canon PowerShot A620", 0, 0, /* DJC */ { 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } }, { "Canon PowerShot A630", 0, 0, /* DJC */ { 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } }, { "Canon PowerShot A640", 0, 0, /* DJC */ { 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } }, { "Canon PowerShot A650", 0, 0, /* DJC */ { 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } }, { "Canon PowerShot A720", 0, 0, /* DJC */ { 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } }, { "Canon PowerShot S3 IS", 0, 0, /* DJC */ { 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } }, { "Canon PowerShot SX110 IS", 0, 0, /* DJC */ { 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } }, { "Canon PowerShot SX220", 0, 0, /* DJC */ { 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } }, { "Casio EX-S20", 0, 0, /* DJC */ { 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } }, { "Casio EX-Z750", 0, 0, /* DJC */ { 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } }, { "Casio EX-Z10", 128, 0xfff, /* DJC */ { 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } }, { "CINE 650", 0, 0, { 3390,480,-500,-800,3610,340,-550,2336,1192 } }, { "CINE 660", 0, 0, { 3390,480,-500,-800,3610,340,-550,2336,1192 } }, { "CINE", 0, 0, { 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } }, { "Contax N Digital", 0, 0xf1e, { 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } }, { "Epson R-D1", 0, 0, { 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } }, { "Fujifilm E550", 0, 0, { 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } }, { "Fujifilm E900", 0, 0, { 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } }, { "Fujifilm F5", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F6", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F77", 0, 0xfe9, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm F7", 0, 0, { 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } }, { "Fujifilm F8", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm S100FS", 514, 0, { 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } }, { "Fujifilm S1", 0, 0, { 12297,-4882,-1202,-2106,10691,1623,-88,1312,4790 } }, { "Fujifilm S20Pro", 0, 0, { 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } }, { "Fujifilm S20", 512, 0x3fff, { 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } }, { "Fujifilm S2Pro", 128, 0, { 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } }, { "Fujifilm S3Pro", 0, 0, { 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } }, { "Fujifilm S5Pro", 0, 0, { 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } }, { "Fujifilm S5000", 0, 0, { 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } }, { "Fujifilm S5100", 0, 0, { 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } }, { "Fujifilm S5500", 0, 0, { 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } }, { "Fujifilm S5200", 0, 0, { 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } }, { "Fujifilm S5600", 0, 0, { 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } }, { "Fujifilm S6", 0, 0, { 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } }, { "Fujifilm S7000", 0, 0, { 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } }, { "Fujifilm S9000", 0, 0, { 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } }, { "Fujifilm S9500", 0, 0, { 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } }, { "Fujifilm S9100", 0, 0, { 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } }, { "Fujifilm S9600", 0, 0, { 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } }, { "Fujifilm SL1000", 0, 0, { 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } }, { "Fujifilm IS-1", 0, 0, { 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } }, { "Fujifilm IS Pro", 0, 0, { 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } }, { "Fujifilm HS10 HS11", 0, 0xf68, { 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } }, { "Fujifilm HS2", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm HS3", 0, 0, { 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } }, { "Fujifilm HS50EXR", 0, 0, { 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } }, { "Fujifilm F900EXR", 0, 0, { 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } }, { "Fujifilm X100S", 0, 0, { 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } }, { "Fujifilm X100T", 0, 0, { 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } }, { "Fujifilm X100", 0, 0, { 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } }, { "Fujifilm X10", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm X20", 0, 0, { 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } }, { "Fujifilm X30", 0, 0, { 12328,-5256,-1144,-4469,12927,1675,-87,1291,4351 } }, { "Fujifilm X-Pro1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-A1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-E1", 0, 0, { 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } }, { "Fujifilm X-E2", 0, 0, { 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } }, { "Fujifilm XF1", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm X-M1", 0, 0, { 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } }, { "Fujifilm X-S1", 0, 0, { 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } }, { "Fujifilm X-T1", 0, 0, /* LibRaw */ { 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } }, { "Fujifilm XQ1", 0, 0, { 14305,-7365,-687,-3117,12383,432,-287,1660,4361 } }, { "Hasselblad Lunar", -512, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Hasselblad Stellar", -800, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, { "Hasselblad CFV", 0, 0, /* Adobe */ { 8519, -3260, -280, -5081, 13459, 1738, -1449, 2960, 7809, } }, { "Hasselblad H-16MP", 0, 0, /* LibRaw */ { 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } }, { "Hasselblad H-22MP", 0, 0, /* LibRaw */ { 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } }, {"Hasselblad H-31MP",0, 0, /* LibRaw */ { 14480,-5448,-1686,-3534,13123,2260,384,2952,7232 } }, {"Hasselblad H-39MP",0, 0, /* Adobe */ {3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718}}, { "Hasselblad H3D-50", 0, 0, /* Adobe */ {3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718}}, {"Hasselblad H4D-40",0, 0, /* LibRaw */ { 6325,-860,-957,-6559,15945,266,167,770,5936 } }, {"Hasselblad H4D-50",0, 0, /* LibRaw */ { 15283,-6272,-465,-2030,16031,478,-2379,390,7965 } }, {"Hasselblad H4D-60",0, 0, /* Adobe */ {9662, -684, -279, -4903, 12293, 2950, -344, 1669, 6024}}, {"Hasselblad H5D-50c",0, 0, /* Adobe */ {4932, -835, 141, -4878, 11868, 3437, -1138, 1961, 7067}}, {"Hasselblad H5D-50",0, 0, /* Adobe */ {5656, -659, -346, -3923, 12306, 1791, -1602, 3509, 5442}}, { "Imacon Ixpress", 0, 0, /* DJC */ { 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } }, { "Kodak NC2000", 0, 0, { 13891,-6055,-803,-465,9919,642,2121,82,1291 } }, { "Kodak DCS315C", -8, 0, { 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } }, { "Kodak DCS330C", -8, 0, { 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } }, { "Kodak DCS420", 0, 0, { 10868,-1852,-644,-1537,11083,484,2343,628,2216 } }, { "Kodak DCS460", 0, 0, { 10592,-2206,-967,-1944,11685,230,2206,670,1273 } }, { "Kodak EOSDCS1", 0, 0, { 10592,-2206,-967,-1944,11685,230,2206,670,1273 } }, { "Kodak EOSDCS3B", 0, 0, { 9898,-2700,-940,-2478,12219,206,1985,634,1031 } }, { "Kodak DCS520C", -178, 0, { 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } }, { "Kodak DCS560C", -177, 0, { 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } }, { "Kodak DCS620C", -177, 0, { 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } }, { "Kodak DCS620X", -176, 0, { 13095,-6231,154,12221,-21,-2137,895,4602,2258 } }, { "Kodak DCS660C", -173, 0, { 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } }, { "Kodak DCS720X", 0, 0, { 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } }, { "Kodak DCS760C", 0, 0, { 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } }, { "Kodak DCS Pro SLR", 0, 0, { 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } }, { "Kodak DCS Pro 14nx", 0, 0, { 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } }, { "Kodak DCS Pro 14", 0, 0, { 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } }, { "Kodak ProBack645", 0, 0, { 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } }, { "Kodak ProBack", 0, 0, { 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } }, { "Kodak P712", 0, 0, { 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } }, { "Kodak P850", 0, 0xf7c, { 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } }, { "Kodak P880", 0, 0xfff, { 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } }, { "Kodak EasyShare Z980", 0, 0, { 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } }, { "Kodak EasyShare Z981", 0, 0, { 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } }, { "Kodak EasyShare Z990", 0, 0xfed, { 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } }, { "Kodak EASYSHARE Z1015", 0, 0xef1, { 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } }, { "Leaf CMost", 0, 0, { 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } }, { "Leaf Valeo 6", 0, 0, { 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } }, { "Leaf Aptus 54S", 0, 0, { 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } }, { "Leaf Aptus 65", 0, 0, { 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } }, { "Leaf Aptus 75", 0, 0, { 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } }, { "Leaf Credo 40", 0, 0, { 8035, 435, -962, -6001, 13872, 2320, -1159, 3065, 5434 } }, { "Leaf Credo 50", 0, 0, { 3984, 0, 0, 0, 10000, 0, 0, 0, 7666 } }, { "Leaf Credo 60", 0, 0, { 8035, 435, -962, -6001, 13872,2320,-1159,3065,5434} }, { "Leaf Credo 80", 0, 0, { 6294, 686, -712, -5435, 13417, 2211, -1006, 2435, 5042} }, { "Leaf", 0, 0, { 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } }, { "Mamiya ZD", 0, 0, { 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } }, { "Micron 2010", 110, 0, /* DJC */ { 16695,-3761,-2151,155,9682,163,3433,951,4904 } }, { "Minolta DiMAGE 5", 0, 0xf7d, { 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } }, { "Minolta DiMAGE 7Hi", 0, 0xf7d, { 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } }, { "Minolta DiMAGE 7", 0, 0xf7d, { 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } }, { "Minolta DiMAGE A1", 0, 0xf8b, { 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } }, { "Minolta DiMAGE A200", 0, 0, { 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } }, { "Minolta DiMAGE A2", 0, 0xf8f, { 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } }, { "Minolta DiMAGE Z2", 0, 0, /* DJC */ { 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } }, { "Minolta DYNAX 5", 0, 0xffb, { 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } }, { "Minolta DYNAX 7", 0, 0xffb, { 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } }, { "Motorola PIXL", 0, 0, /* DJC */ { 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } }, { "Nikon D100", 0, 0, { 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } }, { "Nikon D1H", 0, 0, { 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } }, { "Nikon D1X", 0, 0, { 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } }, { "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */ { 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } }, { "Nikon D200", 0, 0xfbc, { 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } }, { "Nikon D2H", 0, 0, { 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } }, { "Nikon D2X", 0, 0, { 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } }, { "Nikon D3000", 0, 0, { 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } }, { "Nikon D3100", 0, 0, { 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } }, { "Nikon D3200", 0, 0xfb9, { 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } }, { "Nikon D3300", 0, 0, { 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } }, { "Nikon D300", 0, 0, { 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } }, { "Nikon D3X", 0, 0, { 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } }, { "Nikon D3S", 0, 0, { 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } }, { "Nikon D3", 0, 0, { 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } }, { "Nikon D40X", 0, 0, { 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } }, { "Nikon D40", 0, 0, { 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } }, { "Nikon D4S", 0, 0, { 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } }, { "Nikon D4", 0, 0, { 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } }, { "Nikon Df", 0, 0, { 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } }, { "Nikon D5000", 0, 0xf00, { 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } }, { "Nikon D5100", 0, 0x3de6, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon D5200", 0, 0, { 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } }, { "Nikon D5300", 0, 0, { 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } }, { "Nikon D5500", 0, 0, /* DJC */ { 5765,-2176,184,-3736,9072,4664,-1028,2213,9259 } }, { "Nikon D50", 0, 0, { 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } }, { "Nikon D600", 0, 0x3e07, { 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } }, {"Nikon D610",0, 0, { 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } }, { "Nikon D60", 0, 0, { 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } }, { "Nikon D7000", 0, 0, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon D7100", 0, 0, { 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } }, { "Nikon D750", 0, 0, { 9020,-2890,-715,-4535,12436,2348,-934,1919,7086 } }, { "Nikon D700", 0, 0, { 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } }, { "Nikon D70", 0, 0, { 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } }, { "Nikon D810", 0, 0, { 9369,-3195,-791,-4488,12430,2301,-893,1796,6872 } }, { "Nikon D800", 0, 0, { 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } }, { "Nikon D80", 0, 0, { 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } }, { "Nikon D90", 0, 0xf00, { 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } }, { "Nikon E700", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E800", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E950", 0, 0x3dd, /* DJC */ { -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } }, { "Nikon E995", 0, 0, /* copied from E5000 */ { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E2100", 0, 0, /* copied from Z2, new white balance */ { 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711} }, { "Nikon E2500", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E3200", 0, 0, /* DJC */ { 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } }, { "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */ { 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } }, { "Nikon E4500", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E5000", 0, 0, { -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } }, { "Nikon E5400", 0, 0, { 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } }, { "Nikon E5700", 0, 0, { -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } }, { "Nikon E8400", 0, 0, { 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } }, { "Nikon E8700", 0, 0, { 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } }, { "Nikon E8800", 0, 0, { 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } }, { "Nikon COOLPIX A", 0, 0, { 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } }, { "Nikon COOLPIX P330", -200, 0, { 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } }, { "Nikon COOLPIX P340", -200, 0, { 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } }, { "Nikon COOLPIX P6000", 0, 0, { 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } }, { "Nikon COOLPIX P7000", 0, 0, { 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } }, { "Nikon COOLPIX P7100", 0, 0, { 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } }, { "Nikon COOLPIX P7700", -3200, 0, { 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } }, { "Nikon COOLPIX P7800", -3200, 0, /* LibRaw */ { 13443,-6418,-673,-1309,10025,1131,-462,1827,4782 } }, { "Nikon 1 V3", -200, 0, { 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } }, { "Nikon 1 J4", 0, 0, { 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } }, { "Nikon 1 S2", 200, 0, { 6612,-1342,-618,-3338,11055,2623,-174,1792,5075 } }, { "Nikon 1 V2", 0, 0, { 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } }, { "Nikon 1 J3", 0, 0, { 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } }, { "Nikon 1 AW1", 0, 0, { 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } }, { "Nikon 1 ", 0, 0, /* J1, J2, S1, V1 */ { 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } }, { "Olympus C5050", 0, 0, { 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } }, { "Olympus C5060", 0, 0, { 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } }, { "Olympus C7070", 0, 0, { 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } }, { "Olympus C70", 0, 0, { 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } }, { "Olympus C80", 0, 0, { 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } }, { "Olympus E-10", 0, 0xffc, { 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } }, { "Olympus E-1", 0, 0, { 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } }, { "Olympus E-20", 0, 0xffc, { 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } }, { "Olympus E-300", 0, 0, { 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } }, { "Olympus E-330", 0, 0, { 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } }, { "Olympus E-30", 0, 0xfbc, { 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } }, { "Olympus E-3", 0, 0xf99, { 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } }, { "Olympus E-400", 0, 0, { 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } }, { "Olympus E-410", 0, 0xf6a, { 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } }, { "Olympus E-420", 0, 0xfd7, { 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } }, { "Olympus E-450", 0, 0xfd2, { 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } }, { "Olympus E-500", 0, 0, { 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } }, { "Olympus E-510", 0, 0xf6a, { 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } }, { "Olympus E-520", 0, 0xfd2, { 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } }, { "Olympus E-5", 0, 0xeec, { 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } }, { "Olympus E-600", 0, 0xfaf, { 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } }, { "Olympus E-620", 0, 0xfaf, { 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } }, { "Olympus E-P1", 0, 0xffd, { 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } }, { "Olympus E-P2", 0, 0xffd, { 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } }, { "Olympus E-P3", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "Olympus E-P5", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-PL1s", 0, 0, { 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } }, { "Olympus E-PL1", 0, 0, { 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } }, { "Olympus E-PL2", 0, 0xcf3, { 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } }, { "Olympus E-PL3", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "Olympus E-PL5", 0, 0xfcb, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-PL6", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-PL7", 0, 0, { 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } }, { "Olympus E-PM1", 0, 0, { 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } }, { "Olympus E-PM2", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-M10", 0, 0, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus E-M1", 0, 0, { 7687,-1984,-606,-4327,11928,2721,-1381,2339,6452 } }, { "Olympus E-M5MarkII", 0, 0, /* DJC */ { 6617,-2589,139,-2917,8499,4419,-884,1913,6829 } }, { "Olympus E-M5", 0, 0xfe1, { 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } }, { "Olympus SP350", 0, 0, { 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } }, { "Olympus SP3", 0, 0, { 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } }, { "Olympus SP500UZ", 0, 0xfff, { 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } }, { "Olympus SP510UZ", 0, 0xffe, { 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } }, { "Olympus SP550UZ", 0, 0xffe, { 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } }, { "Olympus SP560UZ", 0, 0xff9, { 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } }, { "Olympus SP570UZ", 0, 0, { 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } }, {"Olympus STYLUS1",0, 0, { 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } }, { "Olympus XZ-10", 0, 0, { 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } }, { "Olympus XZ-1", 0, 0, { 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } }, { "Olympus XZ-2", 0, 0, { 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } }, { "OmniVision", 0, 0, /* DJC */ { 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, { "Pentax *ist DL2", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Pentax *ist DL", 0, 0, { 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } }, { "Pentax *ist DS2", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Pentax *ist DS", 0, 0, { 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } }, { "Pentax *ist D", 0, 0, { 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } }, { "Pentax K10D", 0, 0, { 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } }, { "Pentax K1", 0, 0, { 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } }, { "Pentax K20D", 0, 0, { 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } }, { "Pentax K200D", 0, 0, { 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } }, { "Pentax K2000", 0, 0, { 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } }, { "Pentax K-m", 0, 0, { 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } }, { "Pentax K-x", 0, 0, { 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } }, { "Pentax K-r", 0, 0, { 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } }, { "Pentax K-3", 0, 0, { 7415,-2052,-721,-5186,12788,2682,-1446,2157,6773 } }, { "Pentax K-5 II", 0, 0, { 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } }, { "Pentax K-5", 0, 0, { 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } }, { "Pentax K-7", 0, 0, { 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } }, { "Pentax K-S1", 0, 0, { 8512,-3211,-787,-4167,11966,2487,-638,1288,6054 } }, { "Pentax MX-1", 0, 0, { 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } }, { "Pentax Q10", 0, 0, { 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } }, { "Pentax 645D", 0, 0x3e00, { 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } }, { "Panasonic DMC-CM1", -15, 0, { 8770, -3194,-820,-2871,11281,1803,-513,1552,4434} }, { "Panasonic DMC-FZ8", 0, 0xf7f, { 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } }, { "Panasonic DMC-FZ18", 0, 0, { 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } }, { "Panasonic DMC-FZ28", -15, 0xf96, { 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } }, { "Panasonic DMC-FZ30", 0, 0xf94, { 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } }, { "Panasonic DMC-FZ3", -15, 0, { 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } }, { "Panasonic DMC-FZ4", -15, 0, { 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } }, { "Panasonic DMC-FZ50", 0, 0, { 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } }, { "Panasonic DMC-FZ7", -15, 0, { 11532,-4324,-1066,-2375,10847,1749,-564,1699,4351 } }, { "Leica V-LUX1", 0, 0, { 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } }, { "Panasonic DMC-L10", -15, 0xf96, { 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } }, { "Panasonic DMC-L1", 0, 0xf7f, { 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } }, { "Leica DIGILUX 3", 0, 0xf7f, { 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } }, { "Panasonic DMC-LC1", 0, 0, { 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } }, { "Leica DIGILUX 2", 0, 0, { 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } }, { "Panasonic DMC-LX100", -15, 0, { 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } }, { "Leica D-LUX (Typ 109)", -15, 0, { 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } }, { "Panasonic DMC-LF1", -15, 0, { 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } }, { "Leica C (Typ 112)", -15, 0, { 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } }, { "Panasonic DMC-LX1", 0, 0xf7f, { 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } }, { "Leica D-Lux (Typ 109)", 0, 0xf7f, /* LibRaw */ { 10031,-4555,-456,-3024,11520,1091,-1342,2611,4752 } }, { "Leica D-LUX2", 0, 0xf7f, { 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } }, { "Panasonic DMC-LX2", 0, 0, { 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } }, { "Leica D-LUX3", 0, 0, { 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } }, { "Panasonic DMC-LX3", -15, 0, { 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } }, { "Leica D-LUX 4", -15, 0, { 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } }, { "Panasonic DMC-LX5", -15, 0, { 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } }, { "Leica D-LUX 5", -15, 0, { 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } }, { "Panasonic DMC-LX7", -15, 0, { 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } }, { "Leica D-LUX 6", -15, 0, { 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } }, { "Panasonic DMC-FZ1000", -15, 0, { 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } }, { "Leica V-LUX (Typ 114)", 15, 0, { 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } }, { "Panasonic DMC-FZ100", -15, 0xfff, { 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } }, { "Leica V-LUX 2", -15, 0xfff, { 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } }, { "Panasonic DMC-FZ150", -15, 0xfff, { 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } }, { "Leica V-LUX 3", -15, 0xfff, { 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } }, { "Panasonic DMC-FZ200", -15, 0xfff, { 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } }, { "Leica V-LUX 4", -15, 0xfff, { 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } }, { "Panasonic DMC-FX150", -15, 0xfff, { 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } }, { "Panasonic DMC-G10", 0, 0, { 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } }, { "Panasonic DMC-G1", -15, 0xf94, { 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } }, { "Panasonic DMC-G2", -15, 0xf3c, { 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } }, { "Panasonic DMC-G3", -15, 0xfff, { 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } }, { "Panasonic DMC-G5", -15, 0xfff, { 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } }, { "Panasonic DMC-G6", -15, 0xfff, { 8294,-2891,-651,-3869,11590,2595,-1183,2267,5352 } }, { "Panasonic DMC-GF1", -15, 0xf92, { 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } }, { "Panasonic DMC-GF2", -15, 0xfff, { 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } }, { "Panasonic DMC-GF3", -15, 0xfff, { 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } }, { "Panasonic DMC-GF5", -15, 0xfff, { 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } }, { "Panasonic DMC-GF6", -15, 0, { 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } }, { "Panasonic DMC-GF7", -15, 0, /* DJC */ { 6086,-2691,-18,-4207,9767,4441,-1486,2640,7441 } }, { "Panasonic DMC-GH1", -15, 0xf92, { 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } }, { "Panasonic DMC-GH2", -15, 0xf95, { 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } }, { "Panasonic DMC-GH3", -15, 0, { 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } }, { "Panasonic DMC-GH4", -15, 0, { 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } }, { "Panasonic DMC-GM1", -15, 0, { 6770,-1895,-744,-5232,13145,2303,-1664,2691,5703 } }, { "Panasonic DMC-GM5", -15, 0, { 8238,-3244,-679,-3921,11814,2384,-836,2022,5852 } }, { "Panasonic DMC-GX1", -15, 0, { 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } }, {"Panasonic DMC-GX7", -15,0, /* LibRaw */ {7541,-2355,-591,-3163,10598,1894,-933,2109,5006}}, {"Panasonic DMC-TZ6",-15, 0, { 15964,-8332,-389,1756,7198,383,862,784,1995 } }, {"Panasonic DMC-ZS4",-15, 0, { 15964,-8332,-389,1756,7198,383,862,784,1995 } }, { "Panasonic DMC-TZ7",-15, 0, { 7901,-2472,-600,-3298,10720,2210,-864,2205,5064 } }, { "Panasonic DMC-ZS5",-15, 0, /* same ID as Panasonic DMC-TZ70 */ { 7901,-2472,-600,-3298,10720,2210,-864,2205,5064 } }, { "Phase One H 20", 0, 0, /* DJC */ { 1313,1855,-109,-6715,15908,808,-327,1840,6020 } }, { "Phase One H 25", 0, 0, { 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } }, {"Phase One IQ250",0, 0, { 4396,-153,-249,-5267,12249,2657,-1397,2323,6014 } }, { "Phase One P 2", 0, 0, { 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } }, { "Phase One P 30", 0, 0, { 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } }, { "Phase One P 45", 0, 0, { 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } }, { "Phase One P40", 0, 0, { 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } }, { "Phase One P65", 0, 0, { 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } }, { "Red One", 704, 0xffff, /* DJC */ { 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } }, { "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */ { 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } }, { "Samsung EX1", 0, 0x3e00, { 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } }, { "Samsung EX2F", 0, 0x7ff, { 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } }, { "Samsung NX mini", 0, 0, { 5222,-1196,-550,-6540,14649,2009,-1666,2819,5657 } }, { "Samsung NX3000", 0, 0, { 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } }, { "Samsung NX30", 0, 0, /* NX30, NX300, NX300M */ { 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } }, { "Samsung NX2000", 0, 0, { 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } }, { "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */ { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX1000", 0, 0, { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX1100", 0, 0, { 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } }, { "Samsung NX11", 0, 0, { 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } }, { "Samsung NX10", 0, 0, /* also NX100 */ { 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } }, { "Samsung NX5", 0, 0, { 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } }, { "Samsung NX1", -128, 0, { 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } }, { "Samsung WB2000", 0, 0xfff, { 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } }, { "Samsung GX-1", 0, 0, { 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } }, { "Samsung GX20", 0, 0, /* copied from Pentax K20D */ { 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } }, { "Samsung S85", 0, 0, /* DJC */ { 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } }, // Foveon: LibRaw color data {"Sigma dp1 Quattro",2047, 0, { 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } }, {"Sigma dp2 Quattro",2047, 0, { 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } }, { "Sigma SD9", 15, 4095, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, { "Sigma SD10", 15, 16383, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, { "Sigma SD14", 15, 16383, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, { "Sigma SD15", 15, 4095, /* LibRaw */ { 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } }, // Merills + SD1 { "Sigma SD1", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP1 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP2 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, { "Sigma DP3 Merrill", 31, 4095, /* LibRaw */ { 5133,-1895,-353,4978,744,144,3837,3069,2777 } }, // Sigma DP (non-Merill Versions) { "Sigma DP", 0, 4095, /* LibRaw */ // { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } }, { 13100,-3638,-847,6855,2369,580,2723,3218,3251 } }, { "Sinar", 0, 0, /* DJC */ { 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } }, { "Sony DSC-F828", 0, 0, { 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } }, { "Sony DSC-R1", -512, 0, { 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } }, { "Sony DSC-V3", 0, 0, { 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } }, { "Sony DSC-RX100M", -800, 0, /* M2 and M3 */ { 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } }, { "Sony DSC-RX100", -800, 0, { 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } }, {"Sony DSC-RX10",0, 0, { 8562,-3595,-385,-2715,11089,1128,-1023,2081,4400 } }, { "Sony DSC-RX1R", -512, 0, { 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } }, { "Sony DSC-RX1", -512, 0, { 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } }, { "Sony DSLR-A100", 0, 0xfeb, { 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } }, { "Sony DSLR-A290", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A2", 0, 0, { 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } }, { "Sony DSLR-A300", 0, 0, { 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } }, { "Sony DSLR-A330", 0, 0, { 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } }, { "Sony DSLR-A350", 0, 0xffc, { 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } }, { "Sony DSLR-A380", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A390", 0, 0, { 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } }, { "Sony DSLR-A450", -512, 0xfeb, { 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } }, { "Sony DSLR-A580", -512, 0xfeb, { 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } }, { "Sony DSLR-A500", -512, 0xfeb, { 6046,-1127,-278,-5574,13076,2786,-691,1419,7625 } }, { "Sony DSLR-A5", -512, 0xfeb, { 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } }, { "Sony DSLR-A700", -512, 0, { 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } }, { "Sony DSLR-A850", -512, 0, { 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } }, { "Sony DSLR-A900", -512, 0, { 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } }, { "Sony ILCA-77M2", -512, 0, { 5991,-1732,-443,-4100,11989,2381,-704,1467,5992 } }, { "Sony ILCE-7M2", -512, 0, { 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } }, { "Sony ILCE-7S", -512, 0, { 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } }, { "Sony ILCE-7R", -512, 0, { 4913,-541,-202,-6130,13513,2906,-1564,2151,7183 } }, { "Sony ILCE-7", -512, 0, { 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } }, { "Sony ILCE", -512, 0, /* 3000, 5000, 5100, 6000, and QX1 */ { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony NEX-5N", -512, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony NEX-5R", -512, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-5T", -512, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-3N", -512, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-3", -512, 0, /* Adobe */ { 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } }, { "Sony NEX-5", -512, 0, /* Adobe */ { 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } }, { "Sony NEX-6", -512, 0, { 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } }, { "Sony NEX-7", -512, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony NEX", -512, 0, /* NEX-C3, NEX-F3 */ { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A33", -512, 0, { 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } }, { "Sony SLT-A35", -512, 0, { 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } }, { "Sony SLT-A37", -512, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A55", -512, 0, { 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } }, { "Sony SLT-A57", -512, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A58", -512, 0, { 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } }, { "Sony SLT-A65", -512, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony SLT-A77", -512, 0, { 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } }, { "Sony SLT-A99", -512, 0, { 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } }, }; double cam_xyz[4][3]; char name[130]; int i, j; int bl4=(cblack[0]+cblack[1]+cblack[2]+cblack[3])/4,bl64=0; if(cblack[4]*cblack[5]>0) { for (unsigned c = 0; c < 4096 && c < cblack[4]*cblack[5]; c++) bl64+=cblack[c+6]; bl64 /= cblack[4]*cblack[5]; } int rblack = black+bl4+bl64; sprintf (name, "%s %s", t_make, t_model); for (i=0; i < sizeof table / sizeof *table; i++) if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) { if (table[i].t_black>0) { black = (ushort) table[i].t_black; memset(cblack,0,sizeof(cblack)); } else if(table[i].t_black <0 && rblack == 0 ) { black = (ushort) (-table[i].t_black); memset(cblack,0,sizeof(cblack)); } if (table[i].t_maximum) maximum = (ushort) table[i].t_maximum; if (table[i].trans[0]) { for (raw_color = j=0; j < 12; j++) #ifdef LIBRAW_LIBRARY_BUILD if(internal_only) imgdata.color.cam_xyz[0][j] = table[i].trans[j] / 10000.0; else imgdata.color.cam_xyz[0][j] = #endif cam_xyz[0][j] = table[i].trans[j] / 10000.0; #ifdef LIBRAW_LIBRARY_BUILD if(!internal_only) #endif cam_xyz_coeff (rgb_cam, cam_xyz); } break; } } void CLASS simple_coeff (int index) { static const float table[][12] = { /* index 0 -- all Foveon cameras */ { 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 }, /* index 1 -- Kodak DC20 and DC25 */ { 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 }, /* index 2 -- Logitech Fotoman Pixtura */ { 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 }, /* index 3 -- Nikon E880, E900, and E990 */ { -1.936280, 1.800443, -1.448486, 2.584324, 1.405365, -0.524955, -0.289090, 0.408680, -1.204965, 1.082304, 2.941367, -1.818705 } }; int i, c; for (raw_color = i=0; i < 3; i++) FORCC rgb_cam[i][c] = table[index][i*colors+c]; } short CLASS guess_byte_order (int words) { uchar test[4][2]; int t=2, msb; double diff, sum[2] = {0,0}; fread (test[0], 2, 2, ifp); for (words-=2; words--; ) { fread (test[t], 2, 1, ifp); for (msb=0; msb < 2; msb++) { diff = (test[t^2][msb] << 8 | test[t^2][!msb]) - (test[t ][msb] << 8 | test[t ][!msb]); sum[msb] += diff*diff; } t = (t+1) & 3; } return sum[0] < sum[1] ? 0x4d4d : 0x4949; } float CLASS find_green (int bps, int bite, int off0, int off1) { UINT64 bitbuf=0; int vbits, col, i, c; ushort img[2][2064]; double sum[]={0,0}; FORC(2) { fseek (ifp, c ? off1:off0, SEEK_SET); for (vbits=col=0; col < width; col++) { for (vbits -= bps; vbits < 0; vbits += bite) { bitbuf <<= bite; for (i=0; i < bite; i+=8) bitbuf |= (unsigned) (fgetc(ifp) << i); } img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps); } } FORC(width-1) { sum[ c & 1] += ABS(img[0][c]-img[1][c+1]); sum[~c & 1] += ABS(img[1][c]-img[0][c+1]); } return 100 * log(sum[0]/sum[1]); } /* Identify which camera created this file, and set global variables accordingly. */ void CLASS identify() { static const short pana[][6] = { { 3130, 1743, 4, 0, -6, 0 }, { 3130, 2055, 4, 0, -6, 0 }, { 3130, 2319, 4, 0, -6, 0 }, { 3170, 2103, 18, 0,-42, 20 }, { 3170, 2367, 18, 13,-42,-21 }, { 3177, 2367, 0, 0, -1, 0 }, { 3304, 2458, 0, 0, -1, 0 }, { 3330, 2463, 9, 0, -5, 0 }, { 3330, 2479, 9, 0,-17, 4 }, { 3370, 1899, 15, 0,-44, 20 }, { 3370, 2235, 15, 0,-44, 20 }, { 3370, 2511, 15, 10,-44,-21 }, { 3690, 2751, 3, 0, -8, -3 }, { 3710, 2751, 0, 0, -3, 0 }, { 3724, 2450, 0, 0, 0, -2 }, { 3770, 2487, 17, 0,-44, 19 }, { 3770, 2799, 17, 15,-44,-19 }, { 3880, 2170, 6, 0, -6, 0 }, { 4060, 3018, 0, 0, 0, -2 }, { 4290, 2391, 3, 0, -8, -1 }, { 4330, 2439, 17, 15,-44,-19 }, { 4508, 2962, 0, 0, -3, -4 }, { 4508, 3330, 0, 0, -3, -6 }, }; static const ushort canon[][11] = { { 1944, 1416, 0, 0, 48, 0 }, { 2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25 }, { 2224, 1456, 48, 6, 0, 2 }, { 2376, 1728, 12, 6, 52, 2 }, { 2672, 1968, 12, 6, 44, 2 }, { 3152, 2068, 64, 12, 0, 0, 16 }, { 3160, 2344, 44, 12, 4, 4 }, { 3344, 2484, 4, 6, 52, 6 }, { 3516, 2328, 42, 14, 0, 0 }, { 3596, 2360, 74, 12, 0, 0 }, { 3744, 2784, 52, 12, 8, 12 }, { 3944, 2622, 30, 18, 6, 2 }, { 3948, 2622, 42, 18, 0, 2 }, { 3984, 2622, 76, 20, 0, 2, 14 }, { 4104, 3048, 48, 12, 24, 12 }, { 4116, 2178, 4, 2, 0, 0 }, { 4152, 2772, 192, 12, 0, 0 }, { 4160, 3124, 104, 11, 8, 65 }, { 4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49 }, { 4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49 }, { 4312, 2876, 22, 18, 0, 2 }, { 4352, 2874, 62, 18, 0, 0 }, { 4476, 2954, 90, 34, 0, 0 }, { 4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49 }, { 4480, 3366, 80, 50, 0, 0 }, { 4496, 3366, 80, 50, 12, 0 }, { 4768, 3516, 96, 16, 0, 0, 0, 16 }, { 4832, 3204, 62, 26, 0, 0 }, { 4832, 3228, 62, 51, 0, 0 }, { 5108, 3349, 98, 13, 0, 0 }, { 5120, 3318, 142, 45, 62, 0 }, { 5280, 3528, 72, 52, 0, 0 }, { 5344, 3516, 142, 51, 0, 0 }, { 5344, 3584, 126,100, 0, 2 }, { 5360, 3516, 158, 51, 0, 0 }, { 5568, 3708, 72, 38, 0, 0 }, { 5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49 }, { 5712, 3774, 62, 20, 10, 2 }, { 5792, 3804, 158, 51, 0, 0 }, { 5920, 3950, 122, 80, 2, 0 }, }; static const struct { ushort id; char t_model[20]; } unique[] = { { 0x001, "EOS-1D" }, { 0x167, "EOS-1DS" }, { 0x168, "EOS 10D" }, { 0x169, "EOS-1D Mark III" }, { 0x170, "EOS 300D" }, { 0x174, "EOS-1D Mark II" }, { 0x175, "EOS 20D" }, { 0x176, "EOS 450D" }, { 0x188, "EOS-1Ds Mark II" }, { 0x189, "EOS 350D" }, { 0x190, "EOS 40D" }, { 0x213, "EOS 5D" }, { 0x215, "EOS-1Ds Mark III" }, { 0x218, "EOS 5D Mark II" }, { 0x232, "EOS-1D Mark II N" }, { 0x234, "EOS 30D" }, { 0x236, "EOS 400D" }, { 0x250, "EOS 7D" }, { 0x252, "EOS 500D" }, { 0x254, "EOS 1000D" }, { 0x261, "EOS 50D" }, { 0x269, "EOS-1D X" }, { 0x270, "EOS 550D" }, { 0x281, "EOS-1D Mark IV" }, { 0x285, "EOS 5D Mark III" }, { 0x286, "EOS 600D" }, { 0x287, "EOS 60D" }, { 0x288, "EOS 1100D" }, { 0x289, "EOS 7D Mark II" }, { 0x301, "EOS 650D" }, { 0x302, "EOS 6D" }, { 0x324, "EOS-1D C" }, { 0x325, "EOS 70D" }, { 0x326, "EOS 700D" }, { 0x327, "EOS 1200D" }, { 0x331, "EOS M" }, { 0x335, "EOS M2" }, { 0x346, "EOS 100D" }, { 0x347, "EOS 760D" }, { 0x382, "EOS 5DS" }, { 0x393, "EOS 750D" }, { 0x401, "EOS 5DS R" }, }, sonique[] = { { 0x002, "DSC-R1" }, { 0x100, "DSLR-A100" }, { 0x101, "DSLR-A900" }, { 0x102, "DSLR-A700" }, { 0x103, "DSLR-A200" }, { 0x104, "DSLR-A350" }, { 0x105, "DSLR-A300" }, {262,"DSLR-A900"}, {263,"DSLR-A380"}, { 0x108, "DSLR-A330" }, { 0x109, "DSLR-A230" }, { 0x10a, "DSLR-A290" }, { 0x10d, "DSLR-A850" }, {270,"DSLR-A850"}, { 0x111, "DSLR-A550" }, { 0x112, "DSLR-A500" }, { 0x113, "DSLR-A450" }, { 0x116, "NEX-5" }, { 0x117, "NEX-3" }, { 0x118, "SLT-A33" }, { 0x119, "SLT-A55V" }, { 0x11a, "DSLR-A560" }, { 0x11b, "DSLR-A580" }, { 0x11c, "NEX-C3" }, { 0x11d, "SLT-A35" }, { 0x11e, "SLT-A65V" }, { 0x11f, "SLT-A77V" }, { 0x120, "NEX-5N" }, { 0x121, "NEX-7" }, {290,"NEX-VG20E"}, { 0x123, "SLT-A37" }, { 0x124, "SLT-A57" }, { 0x125, "NEX-F3" }, { 0x126, "SLT-A99V" }, { 0x127, "NEX-6" }, { 0x128, "NEX-5R" }, { 0x129, "DSC-RX100" }, { 0x12a, "DSC-RX1" }, {299,"NEX-VG900"}, {300,"NEX-VG30E"}, { 0x12e, "ILCE-3000" }, { 0x12f, "SLT-A58" }, { 0x131, "NEX-3N" }, { 0x132, "ILCE-7" }, { 0x133, "NEX-5T" }, { 0x134, "DSC-RX100M2" }, { 0x135, "DSC-RX10" }, { 0x136, "DSC-RX1R" }, { 0x137, "ILCE-7R" }, { 0x138, "ILCE-6000" }, { 0x139, "ILCE-5000" }, { 0x13d, "DSC-RX100M3" }, { 0x13e, "ILCE-7S" }, { 0x13f, "ILCA-77M2" }, { 0x153, "ILCE-5100" }, { 0x154, "ILCE-7M2" }, { 0x15a, "ILCE-QX1" }, }; static const struct { unsigned fsize; ushort rw, rh; uchar lm, tm, rm, bm, lf, cf, max, flags; char t_make[10], t_model[20]; ushort offset; } table[] = { { 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" }, { 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" }, { 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" }, { 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" }, { 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 }, { 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" }, { 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 }, { 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" }, { 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" }, { 9631728,2532,1902, 0, 0, 0, 0,96,0x61,0,0,"Alcatel","5035D" }, // Android Raw dumps id start // File Size in bytes Horizontal Res Vertical Flag then bayer order eg 0x16 bbgr 0x94 rggb { 16424960,4208,3120, 0, 0, 0, 0, 1,0x16,0,0,"Sony","IMX135-mipi 13mp" }, { 17522688,4212,3120, 0, 0, 0, 0, 0,0x16,0,0,"Sony","IMX135-QCOM" }, { 10223360,2608,1960, 0, 0, 0, 0, 1,0x94,0,0,"Sony","IMX072-mipi" }, { 5107712,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"HTC","UltraPixel" }, { 1540857,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"Samsung","S3" }, { 10223363,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"Samsung","GalaxyNexus" }, // Android Raw dumps id end { 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 }, { 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" }, { 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" }, { 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" }, { 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" }, { 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" }, { 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" }, { 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" }, { 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" }, { 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" }, { 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" }, { 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" }, { 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" }, { 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" }, { 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" }, { 19131120,4168,3060,92,16, 4, 1,40,0x94,0,2,"Canon","PowerShot SX220 HS" }, { 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" }, { 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" }, { 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" }, { 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" }, { 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" }, { 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" }, { 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" }, { 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" }, { 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" }, { 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" }, { 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" }, { 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" }, { 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" }, { 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" }, { 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" }, { 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" }, { 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" }, { 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" }, { 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" }, { 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" }, { 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" }, { 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" }, { 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" }, { 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" }, { 28829184,4384,3288, 0, 0, 0, 0,36,0x61,0,0,"DJI" }, { 15151104,4608,3288, 0, 0, 0, 0, 0,0x94,0,0,"Matrix" }, { 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" }, { 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic" }, { 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" }, { 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" }, { 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" }, { 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" }, { 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 }, { 2247168,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" }, { 3370752,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" }, { 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" }, { 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 }, { 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" }, { 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" }, { 12241200,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP" }, { 12272756,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP",31556 }, { 18000000,4000,3000, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","12MP" }, { 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" }, { 15360000,3200,2400, 0, 0, 0, 0,96,0x16,0,0,"Lenovo","A820" }, { 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 }, { 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 }, { 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" }, { 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" }, { 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" }, { 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" }, { 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" }, { 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" }, { 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" }, { 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" }, { 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" }, { 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" }, { 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" }, { 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" }, { 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" }, { 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" }, { 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" }, { 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" }, { 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" }, { 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" }, { 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" }, { 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" }, { 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" }, { 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 }, { 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 }, { 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 }, { 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" }, { 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" }, }; static const char *corp[] = { "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm", "Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica", "Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh", "Samsung", "Sigma", "Sinar", "Sony" }; char head[32], *cp; int hlen, flen, fsize, zero_fsize=1, i, c; struct jhead jh; tiff_flip = flip = filters = UINT_MAX; /* unknown */ raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0; maximum = height = width = top_margin = left_margin = 0; cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0; iso_speed = shutter = aperture = focal_len = unique_id = 0; tiff_nifds = 0; memset (tiff_ifd, 0, sizeof tiff_ifd); memset (gpsdata, 0, sizeof gpsdata); memset (cblack, 0, sizeof cblack); memset (white, 0, sizeof white); memset (mask, 0, sizeof mask); thumb_offset = thumb_length = thumb_width = thumb_height = 0; load_raw = thumb_load_raw = 0; write_thumb = &CLASS jpeg_thumb; data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0; kodak_cbpp = zero_after_ff = dng_version = load_flags = 0; timestamp = shot_order = tiff_samples = black = is_foveon = 0; mix_green = profile_length = data_error = zero_is_bad = 0; pixel_aspect = is_raw = raw_color = 1; tile_width = tile_length = 0; for (i=0; i < 4; i++) { cam_mul[i] = i == 1; pre_mul[i] = i < 3; FORC3 cmatrix[c][i] = 0; FORC3 rgb_cam[c][i] = c == i; } colors = 3; for (i=0; i < 0x10000; i++) curve[i] = i; order = get2(); hlen = get4(); fseek (ifp, 0, SEEK_SET); fread (head, 1, 32, ifp); fseek (ifp, 0, SEEK_END); flen = fsize = ftell(ifp); if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) || (cp = (char *) memmem (head, 32, (char*)"IIII", 4))) { parse_phase_one (cp-head); if (cp-head && parse_tiff(0)) apply_tiff(); } else if (order == 0x4949 || order == 0x4d4d) { if (!memcmp (head+6,"HEAPCCDR",8)) { data_offset = hlen; #ifdef LIBRAW_LIBRARY_BUILD imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; #endif parse_ciff (hlen, flen-hlen, 0); load_raw = &CLASS canon_load_raw; } else if (parse_tiff(0)) apply_tiff(); } else if (!memcmp (head,"\xff\xd8\xff\xe1",4) && !memcmp (head+6,"Exif",4)) { fseek (ifp, 4, SEEK_SET); data_offset = 4 + get2(); fseek (ifp, data_offset, SEEK_SET); if (fgetc(ifp) != 0xff) parse_tiff(12); thumb_offset = 0; } else if (!memcmp (head+25,"ARECOYK",7)) { strcpy (make, "Contax"); strcpy (model,"N Digital"); fseek (ifp, 33, SEEK_SET); get_timestamp(1); fseek (ifp, 52, SEEK_SET); switch (get4()) { case 7: iso_speed = 25; break; case 8: iso_speed = 32; break; case 9: iso_speed = 40; break; case 10: iso_speed = 50; break; case 11: iso_speed = 64; break; case 12: iso_speed = 80; break; case 13: iso_speed = 100; break; case 14: iso_speed = 125; break; case 15: iso_speed = 160; break; case 16: iso_speed = 200; break; case 17: iso_speed = 250; break; case 18: iso_speed = 320; break; case 19: iso_speed = 400; break; } shutter = powf64(2.0f, (((float)get4())/8.0f)) / 16000.0f; FORC4 cam_mul[c ^ (c >> 1)] = get4(); fseek (ifp, 88, SEEK_SET); aperture = powf64(2.0f, ((float)get4())/16.0f); fseek (ifp, 112, SEEK_SET); focal_len = get4(); #ifdef LIBRAW_LIBRARY_BUILD fseek (ifp, 104, SEEK_SET); imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, ((float)get4())/16.0f); fseek (ifp, 124, SEEK_SET); fread(imgdata.lens.makernotes.Lens, 32, 1, ifp); imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Contax_N; if (imgdata.lens.makernotes.Lens[0]) imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Contax_N; #endif } else if (!strcmp (head, "PXN")) { strcpy (make, "Logitech"); strcpy (model,"Fotoman Pixtura"); } else if (!strcmp (head, "qktk")) { strcpy (make, "Apple"); strcpy (model,"QuickTake 100"); load_raw = &CLASS quicktake_100_load_raw; } else if (!strcmp (head, "qktn")) { strcpy (make, "Apple"); strcpy (model,"QuickTake 150"); load_raw = &CLASS kodak_radc_load_raw; } else if (!memcmp (head,"FUJIFILM",8)) { fseek (ifp, 84, SEEK_SET); thumb_offset = get4(); thumb_length = get4(); fseek (ifp, 92, SEEK_SET); parse_fuji (get4()); if (thumb_offset > 120) { fseek (ifp, 120, SEEK_SET); is_raw += (i = get4()) && 1; if (is_raw == 2 && shot_select) parse_fuji (i); } load_raw = &CLASS unpacked_load_raw; fseek (ifp, 100+28*(shot_select > 0), SEEK_SET); parse_tiff (data_offset = get4()); parse_tiff (thumb_offset+12); apply_tiff(); } else if (!memcmp (head,"RIFF",4)) { fseek (ifp, 0, SEEK_SET); parse_riff(); } else if (!memcmp (head+4,"ftypqt ",9)) { fseek (ifp, 0, SEEK_SET); parse_qt (fsize); is_raw = 0; } else if (!memcmp (head,"\0\001\0\001\0@",6)) { fseek (ifp, 6, SEEK_SET); fread (make, 1, 8, ifp); fread (model, 1, 8, ifp); fread (model2, 1, 16, ifp); data_offset = get2(); get2(); raw_width = get2(); raw_height = get2(); load_raw = &CLASS nokia_load_raw; filters = 0x61616161; } else if (!memcmp (head,"NOKIARAW",8)) { strcpy (make, "NOKIA"); order = 0x4949; fseek (ifp, 300, SEEK_SET); data_offset = get4(); i = get4(); width = get2(); height = get2(); switch (tiff_bps = i*8 / (width * height)) { case 8: load_raw = &CLASS eight_bit_load_raw; break; case 10: load_raw = &CLASS nokia_load_raw; } raw_height = height + (top_margin = i / (width * tiff_bps/8) - height); mask[0][3] = 1; filters = 0x61616161; } else if (!memcmp (head,"ARRI",4)) { order = 0x4949; fseek (ifp, 20, SEEK_SET); width = get4(); height = get4(); strcpy (make, "ARRI"); fseek (ifp, 668, SEEK_SET); fread (model, 1, 64, ifp); data_offset = 4096; load_raw = &CLASS packed_load_raw; load_flags = 88; filters = 0x61616161; } else if (!memcmp (head,"XPDS",4)) { order = 0x4949; fseek (ifp, 0x800, SEEK_SET); fread (make, 1, 41, ifp); raw_height = get2(); raw_width = get2(); fseek (ifp, 56, SEEK_CUR); fread (model, 1, 30, ifp); data_offset = 0x10000; load_raw = &CLASS canon_rmf_load_raw; gamma_curve (0, 12.25, 1, 1023); } else if (!memcmp (head+4,"RED1",4)) { strcpy (make, "Red"); strcpy (model,"One"); parse_redcine(); load_raw = &CLASS redcine_load_raw; gamma_curve (1/2.4, 12.92, 1, 4095); filters = 0x49494949; } else if (!memcmp (head,"DSC-Image",9)) parse_rollei(); else if (!memcmp (head,"PWAD",4)) parse_sinar_ia(); else if (!memcmp (head,"\0MRM",4)) parse_minolta(0); else if (!memcmp (head,"FOVb",4)) { #ifdef LIBRAW_LIBRARY_BUILD #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 if(!imgdata.params.force_foveon_x3f) parse_foveon(); else #endif parse_x3f(); #else #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 parse_foveon(); #endif #endif } else if (!memcmp (head,"CI",2)) parse_cine(); else for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++) if (fsize == table[i].fsize) { strcpy (make, table[i].t_make ); #ifdef LIBRAW_LIBRARY_BUILD if (!strcmp(make, "Canon")) { imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens; } #endif strcpy (model, table[i].t_model); flip = table[i].flags >> 2; zero_is_bad = table[i].flags & 2; if (table[i].flags & 1) parse_external_jpeg(); data_offset = table[i].offset; raw_width = table[i].rw; raw_height = table[i].rh; left_margin = table[i].lm; top_margin = table[i].tm; width = raw_width - left_margin - table[i].rm; height = raw_height - top_margin - table[i].bm; filters = 0x1010101 * table[i].cf; colors = 4 - !((filters & filters >> 1) & 0x5555); load_flags = table[i].lf; switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) { case 6: load_raw = &CLASS minolta_rd175_load_raw; break; case 8: load_raw = &CLASS eight_bit_load_raw; break; case 10: if ((fsize-data_offset)/raw_height*3 >= raw_width*4) { load_raw = &CLASS android_loose_load_raw; break; } else if (load_flags & 1) { load_raw = &CLASS android_tight_load_raw; break; } case 12: load_flags |= 128; load_raw = &CLASS packed_load_raw; break; case 16: order = 0x4949 | 0x404 * (load_flags & 1); tiff_bps -= load_flags >> 4; tiff_bps -= load_flags = load_flags >> 1 & 7; load_raw = &CLASS unpacked_load_raw; } maximum = (1 << tiff_bps) - (1 << table[i].max); } if (zero_fsize) fsize = 0; if (make[0] == 0) parse_smal (0, flen); if (make[0] == 0) { parse_jpeg(0); fseek(ifp,0,SEEK_END); int sz = ftell(ifp); if (!(strncmp(model,"ov",2) && strncmp(model,"RP_OV",5)) && sz>=6404096 && !fseek (ifp, -6404096, SEEK_END) && fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) { strcpy (make, "OmniVision"); data_offset = ftell(ifp) + 0x8000-32; width = raw_width; raw_width = 2611; load_raw = &CLASS nokia_load_raw; filters = 0x16161616; } else is_raw = 0; } for (i=0; i < sizeof corp / sizeof *corp; i++) if (strcasestr (make, corp[i])) /* Simplify company names */ strcpy (make, corp[i]); if ((!strcmp(make,"Kodak") || !strcmp(make,"Leica")) && ((cp = strcasestr(model," DIGITAL CAMERA")) || (cp = strstr(model,"FILE VERSION")))) *cp = 0; if (!strncasecmp(model,"PENTAX",6)) strcpy (make, "Pentax"); cp = make + strlen(make); /* Remove trailing spaces */ while (*--cp == ' ') *cp = 0; cp = model + strlen(model); while (*--cp == ' ') *cp = 0; i = strlen(make); /* Remove make from model */ if (!strncasecmp (model, make, i) && model[i++] == ' ') memmove (model, model+i, 64-i); if (!strncmp (model,"FinePix ",8)) strcpy (model, model+8); if (!strncmp (model,"Digital Camera ",15)) strcpy (model, model+15); desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0; if (!is_raw) goto notraw; if (!height) height = raw_height; if (!width) width = raw_width; if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */ { height = 2616; width = 3896; } if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */ { height = 3124; width = 4688; filters = 0x16161616; } if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x"))) { width = 4309; filters = 0x16161616; } if (width >= 4960 && !strncmp(model,"K-5",3)) { left_margin = 10; width = 4950; filters = 0x16161616; } if (width == 4736 && !strcmp(model,"K-7")) { height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; } if (width == 6080 && !strcmp(model,"K-3")) { left_margin = 4; width = 6040; } if (width == 7424 && !strcmp(model,"645D")) { height = 5502; width = 7328; filters = 0x61616161; top_margin = 29; left_margin = 48; } if (height == 3014 && width == 4096) /* Ricoh GX200 */ width = 4014; if (dng_version) { if (filters == UINT_MAX) filters = 0; if (filters) is_raw = tiff_samples; else colors = tiff_samples; switch (tiff_compress) { case 0: /* Compression not set, assuming uncompressed */ case 1: load_raw = &CLASS packed_dng_load_raw; break; case 7: load_raw = &CLASS lossless_dng_load_raw; break; case 34892: load_raw = &CLASS lossy_dng_load_raw; break; default: load_raw = 0; } if (!strcmp(make, "Canon") && unique_id) { for (i = 0; i < sizeof unique / sizeof *unique; i++) if (unique_id == 0x80000000 + unique[i].id) { strcpy(model, unique[i].t_model); break; } } if (!strcasecmp(make, "Sony") && unique_id) { for (i = 0; i < sizeof sonique / sizeof *sonique; i++) if (unique_id == sonique[i].id) { strcpy(model, sonique[i].t_model); break; } } goto dng_skip; } if (!strcmp(make,"Canon") && !fsize && tiff_bps != 15) { if (!load_raw) load_raw = &CLASS lossless_jpeg_load_raw; for (i=0; i < sizeof canon / sizeof *canon; i++) if (raw_width == canon[i][0] && raw_height == canon[i][1]) { width = raw_width - (left_margin = canon[i][2]); height = raw_height - (top_margin = canon[i][3]); width -= canon[i][4]; height -= canon[i][5]; mask[0][1] = canon[i][6]; mask[0][3] = -canon[i][7]; mask[1][1] = canon[i][8]; mask[1][3] = -canon[i][9]; if (canon[i][10]) filters = canon[i][10] * 0x01010101; } if ((unique_id | 0x20000) == 0x2720000) { left_margin = 8; top_margin = 16; } } if (!strcmp(make,"Canon") && unique_id) { for (i=0; i < sizeof unique / sizeof *unique; i++) if (unique_id == 0x80000000 + unique[i].id) { adobe_coeff ("Canon", unique[i].t_model); strcpy(model,unique[i].t_model); } } if (!strcasecmp(make,"Sony") && unique_id) { for (i=0; i < sizeof sonique / sizeof *sonique; i++) if (unique_id == sonique[i].id) { adobe_coeff ("Sony", sonique[i].t_model); strcpy(model,sonique[i].t_model); } } if (!strcmp(make,"Nikon")) { if (!load_raw) load_raw = &CLASS packed_load_raw; if (model[0] == 'E') load_flags |= !data_offset << 2 | 2; } /* Set parameters based on camera name (for non-DNG files). */ if (!strcmp(model,"KAI-0340") && find_green (16, 16, 3840, 5120) < 25) { height = 480; top_margin = filters = 0; strcpy (model,"C603"); } if (is_foveon) { if (height*2 < width) pixel_aspect = 0.5; if (height > width) pixel_aspect = 2; filters = 0; #ifdef LIBRAW_DEMOSAIC_PACK_GPL2 if(!imgdata.params.force_foveon_x3f) simple_coeff(0); #endif } else if (!strcmp(make,"Canon") && tiff_bps == 15) { switch (width) { case 3344: width -= 66; case 3872: width -= 6; } if (height > width) SWAP(height,width); filters = 0; tiff_samples = colors = 3; load_raw = &CLASS canon_sraw_load_raw; } else if (!strcmp(model,"PowerShot 600")) { height = 613; width = 854; raw_width = 896; colors = 4; filters = 0xe1e4e1e4; load_raw = &CLASS canon_600_load_raw; } else if (!strcmp(model,"PowerShot A5") || !strcmp(model,"PowerShot A5 Zoom")) { height = 773; width = 960; raw_width = 992; pixel_aspect = 256/235.0; filters = 0x1e4e1e4e; goto canon_a5; } else if (!strcmp(model,"PowerShot A50")) { height = 968; width = 1290; raw_width = 1320; filters = 0x1b4e4b1e; goto canon_a5; } else if (!strcmp(model,"PowerShot Pro70")) { height = 1024; width = 1552; filters = 0x1e4b4e1b; canon_a5: colors = 4; tiff_bps = 10; load_raw = &CLASS packed_load_raw; load_flags = 40; } else if (!strcmp(model,"PowerShot Pro90 IS") || !strcmp(model,"PowerShot G1")) { colors = 4; filters = 0xb4b4b4b4; } else if (!strcmp(model,"PowerShot A610")) { if (canon_s2is()) strcpy (model+10, "S2 IS"); } else if (!strcmp(model,"PowerShot SX220 HS")) { mask[1][3] = -4; top_margin=16; left_margin = 92; } else if (!strcmp(model,"PowerShot S120")) { raw_width = 4192; raw_height = 3062; width = 4022; height = 3016; mask[0][0] = top_margin = 31; mask[0][2] = top_margin + height; left_margin = 120; mask[0][1] = 23; mask[0][3] = 72; } else if (!strcmp(model,"PowerShot G16")) { mask[0][0] = 0; mask[0][2] = 80; mask[0][1] = 0; mask[0][3] = 16; top_margin = 29; left_margin = 120; width = raw_width-left_margin-48; height = raw_height-top_margin-14; } else if (!strcmp(model,"PowerShot SX50 HS")) { top_margin = 17; } else if (!strcmp(model,"EOS D2000C")) { filters = 0x61616161; black = curve[200]; } else if (!strcmp(model,"D1")) { cam_mul[0] *= 256/527.0; cam_mul[2] *= 256/317.0; } else if (!strcmp(model,"D1X")) { width -= 4; pixel_aspect = 0.5; } else if (!strcmp(model,"D40X") || !strcmp(model,"D60") || !strcmp(model,"D80") || !strcmp(model,"D3000")) { height -= 3; width -= 4; } else if (!strcmp(model,"D3") || !strcmp(model,"D3S") || !strcmp(model,"D700")) { width -= 4; left_margin = 2; } else if (!strcmp(model,"D3100")) { width -= 28; left_margin = 6; } else if (!strcmp(model,"D5000") || !strcmp(model,"D90")) { width -= 42; } else if (!strcmp(model,"D5100") || !strcmp(model,"D7000") || !strcmp(model,"COOLPIX A")) { width -= 44; } else if (!strcmp(model,"D3200") || !strncmp(model,"D6",2) || !strncmp(model,"D800",4)) { width -= 46; } else if (!strcmp(model,"D4") || !strcmp(model,"Df")) { width -= 52; left_margin = 2; } else if (!strncmp(model,"D40",3) || !strncmp(model,"D50",3) || !strncmp(model,"D70",3)) { width--; } else if (!strcmp(model,"D100")) { if (load_flags) raw_width = (width += 3) + 3; } else if (!strcmp(model,"D200")) { left_margin = 1; width -= 4; filters = 0x94949494; } else if (!strncmp(model,"D2H",3)) { left_margin = 6; width -= 14; } else if (!strncmp(model,"D2X",3)) { if (width == 3264) width -= 32; else width -= 8; } else if (!strncmp(model,"D300",4)) { width -= 32; } else if (!strcmp(make,"Nikon") && raw_width == 4032) { if(!strcmp(model,"COOLPIX P7700")) { adobe_coeff ("Nikon","COOLPIX P7700"); maximum = 65504; load_flags = 0; } else if(!strcmp(model,"COOLPIX P7800")) { adobe_coeff ("Nikon","COOLPIX P7800"); maximum = 65504; load_flags = 0; } else if(!strcmp(model,"COOLPIX P340")) load_flags=0; } else if (!strncmp(model,"COOLPIX P",9) && raw_width != 4032) { load_flags = 24; filters = 0x94949494; if (model[9] == '7' && iso_speed >= 400) black = 255; } else if (!strncmp(model,"1 ",2)) { height -= 2; } else if (fsize == 1581060) { simple_coeff(3); pre_mul[0] = 1.2085; pre_mul[1] = 1.0943; pre_mul[3] = 1.1103; } else if (fsize == 3178560) { cam_mul[0] *= 4; cam_mul[2] *= 4; } else if (fsize == 4771840) { if (!timestamp && nikon_e995()) strcpy (model, "E995"); if (strcmp(model,"E995")) { filters = 0xb4b4b4b4; simple_coeff(3); pre_mul[0] = 1.196; pre_mul[1] = 1.246; pre_mul[2] = 1.018; } } else if (fsize == 2940928) { if (!timestamp && !nikon_e2100()) strcpy (model,"E2500"); if (!strcmp(model,"E2500")) { height -= 2; load_flags = 6; colors = 4; filters = 0x4b4b4b4b; } } else if (fsize == 4775936) { if (!timestamp) nikon_3700(); if (model[0] == 'E' && atoi(model+1) < 3700) filters = 0x49494949; if (!strcmp(model,"Optio 33WR")) { flip = 1; filters = 0x16161616; } if (make[0] == 'O') { i = find_green (12, 32, 1188864, 3576832); c = find_green (12, 32, 2383920, 2387016); if (abs(i) < abs(c)) { SWAP(i,c); load_flags = 24; } if (i < 0) filters = 0x61616161; } } else if (fsize == 5869568) { if (!timestamp && minolta_z2()) { strcpy (make, "Minolta"); strcpy (model,"DiMAGE Z2"); } load_flags = 6 + 24*(make[0] == 'M'); } else if (fsize == 6291456) { fseek (ifp, 0x300000, SEEK_SET); if ((order = guess_byte_order(0x10000)) == 0x4d4d) { height -= (top_margin = 16); width -= (left_margin = 28); maximum = 0xf5c0; strcpy (make, "ISG"); model[0] = 0; } } else if (!strcmp(make,"Fujifilm")) { if (!strcmp(model+7,"S2Pro")) { strcpy (model,"S2Pro"); height = 2144; width = 2880; flip = 6; } else if (load_raw != &CLASS packed_load_raw) maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00; top_margin = (raw_height - height) >> 2 << 1; left_margin = (raw_width - width ) >> 2 << 1; if (width == 2848 || width == 3664) filters = 0x16161616; if (width == 4032 || width == 4952) left_margin = 0; if (width == 3328 && (width -= 66)) left_margin = 34; if (width == 4936) left_margin = 4; if (!strcmp(model,"HS50EXR") || !strcmp(model,"F900EXR")) { width += 2; left_margin = 0; filters = 0x16161616; } if(!strcmp(model,"S5500")) { height -= (top_margin=6); } if (fuji_layout) raw_width *= is_raw; if (filters == 9) FORC(36) xtrans[0][c] = xtrans_abs[(c/6+top_margin) % 6][(c+left_margin) % 6]; } else if (!strcmp(model,"KD-400Z")) { height = 1712; width = 2312; raw_width = 2336; goto konica_400z; } else if (!strcmp(model,"KD-510Z")) { goto konica_510z; } else if (!strcasecmp(make,"Minolta")) { if (!load_raw && (maximum = 0xfff)) load_raw = &CLASS unpacked_load_raw; if (!strncmp(model,"DiMAGE A",8)) { if (!strcmp(model,"DiMAGE A200")) filters = 0x49494949; tiff_bps = 12; load_raw = &CLASS packed_load_raw; } else if (!strncmp(model,"ALPHA",5) || !strncmp(model,"DYNAX",5) || !strncmp(model,"MAXXUM",6)) { sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M')); adobe_coeff (make, model+20); load_raw = &CLASS packed_load_raw; } else if (!strncmp(model,"DiMAGE G",8)) { if (model[8] == '4') { height = 1716; width = 2304; } else if (model[8] == '5') { konica_510z: height = 1956; width = 2607; raw_width = 2624; } else if (model[8] == '6') { height = 2136; width = 2848; } data_offset += 14; filters = 0x61616161; konica_400z: load_raw = &CLASS unpacked_load_raw; maximum = 0x3df; order = 0x4d4d; } } else if (!strcmp(model,"*ist D")) { load_raw = &CLASS unpacked_load_raw; data_error = -1; } else if (!strcmp(model,"*ist DS")) { height -= 2; } else if (!strcmp(make,"Samsung") && raw_width == 4704) { height -= top_margin = 8; width -= 2 * (left_margin = 8); load_flags = 32; } else if (!strcmp(make,"Samsung") && !strcmp(model,"NX3000")) { top_margin = 24; left_margin = 64; width = 5472; height = 3648; filters = 0x61616161; colors = 3; } else if (!strcmp(make,"Samsung") && raw_height == 3714) { height -= top_margin = 18; left_margin = raw_width - (width = 5536); if (raw_width != 5600) left_margin = top_margin = 0; filters = 0x61616161; colors = 3; } else if (!strcmp(make,"Samsung") && raw_width == 5632) { order = 0x4949; height = 3694; top_margin = 2; width = 5574 - (left_margin = 32 + tiff_bps); if (tiff_bps == 12) load_flags = 80; } else if (!strcmp(make,"Samsung") && raw_width == 5664) { height -= top_margin = 17; left_margin = 96; width = 5544; filters = 0x49494949; } else if (!strcmp(make,"Samsung") && raw_width == 6496) { filters = 0x61616161; } else if (!strcmp(model,"EX1")) { order = 0x4949; height -= 20; top_margin = 2; if ((width -= 6) > 3682) { height -= 10; width -= 46; top_margin = 8; } } else if (!strcmp(model,"WB2000")) { order = 0x4949; height -= 3; top_margin = 2; if ((width -= 10) > 3718) { height -= 28; width -= 56; top_margin = 8; } } else if (strstr(model,"WB550")) { strcpy (model, "WB550"); } else if (!strcmp(model,"EX2F")) { height = 3045; width = 4070; top_margin = 3; order = 0x4949; filters = 0x49494949; load_raw = &CLASS unpacked_load_raw; } else if (!strcmp(model,"STV680 VGA")) { black = 16; } else if (!strcmp(model,"N95")) { height = raw_height - (top_margin = 2); } else if (!strcmp(model,"640x480")) { gamma_curve (0.45, 4.5, 1, 255); } else if (!strcmp(make,"Hasselblad")) { if (load_raw == &CLASS lossless_jpeg_load_raw) load_raw = &CLASS hasselblad_load_raw; if (raw_width == 7262) { height = 5444; width = 7248; top_margin = 4; left_margin = 7; filters = 0x61616161; if(!strcasecmp(model,"H3D")) { adobe_coeff("Hasselblad","H3DII-39"); strcpy(model,"H3DII-39"); } } else if (raw_width == 7410 || raw_width == 8282) { height -= 84; width -= 82; top_margin = 4; left_margin = 41; filters = 0x61616161; adobe_coeff("Hasselblad","H4D-40"); strcpy(model,"H4D-40"); } else if (raw_width == 9044) { if(black > 500) { top_margin = 12; left_margin = 44; width = 8956; height = 6708; memset(cblack,0,sizeof(cblack)); adobe_coeff("Hasselblad","H4D-60"); strcpy(model,"H4D-60"); black = 512; } else { height = 6716; width = 8964; top_margin = 8; left_margin = 40; black += load_flags = 256; maximum = 0x8101; strcpy(model,"H3DII-60"); } } else if (raw_width == 4090) { strcpy (model, "V96C"); height -= (top_margin = 6); width -= (left_margin = 3) + 7; filters = 0x61616161; } else if (raw_width == 8282 && raw_height == 6240) { if(!strcasecmp(model,"H5D")) { /* H5D 50*/ left_margin = 54; top_margin = 16; width = 8176; height = 6132; black = 256; strcpy(model,"H5D-50"); } else if(!strcasecmp(model,"H3D")) { black=0; left_margin = 54; top_margin = 16; width = 8176; height = 6132; memset(cblack,0,sizeof(cblack)); adobe_coeff("Hasselblad","H3D-50"); strcpy(model,"H3D-50"); } } else if (raw_width == 8374 && raw_height == 6304) { /* H5D 50c*/ left_margin = 52; top_margin = 100; width = 8272; height = 6200; black = 256; strcpy(model,"H5D-50c"); } if (tiff_samples > 1) { is_raw = tiff_samples+1; if (!shot_select && !half_size) filters = 0; } } else if (!strcmp(make,"Sinar")) { if (!load_raw) load_raw = &CLASS unpacked_load_raw; if (is_raw > 1 && !shot_select && !half_size) filters = 0; maximum = 0x3fff; } else if (!strcmp(make,"Leaf")) { maximum = 0x3fff; fseek (ifp, data_offset, SEEK_SET); if (ljpeg_start (&jh, 1) && jh.bits == 15) maximum = 0x1fff; if (tiff_samples > 1) filters = 0; if (tiff_samples > 1 || tile_length < raw_height) { load_raw = &CLASS leaf_hdr_load_raw; raw_width = tile_width; } if ((width | height) == 2048) { if (tiff_samples == 1) { filters = 1; strcpy (cdesc, "RBTG"); strcpy (model, "CatchLight"); top_margin = 8; left_margin = 18; height = 2032; width = 2016; } else { strcpy (model, "DCB2"); top_margin = 10; left_margin = 16; height = 2028; width = 2022; } } else if (width+height == 3144+2060) { if (!model[0]) strcpy (model, "Cantare"); if (width > height) { top_margin = 6; left_margin = 32; height = 2048; width = 3072; filters = 0x61616161; } else { left_margin = 6; top_margin = 32; width = 2048; height = 3072; filters = 0x16161616; } if (!cam_mul[0] || model[0] == 'V') filters = 0; else is_raw = tiff_samples; } else if (width == 2116) { strcpy (model, "Valeo 6"); height -= 2 * (top_margin = 30); width -= 2 * (left_margin = 55); filters = 0x49494949; } else if (width == 3171) { strcpy (model, "Valeo 6"); height -= 2 * (top_margin = 24); width -= 2 * (left_margin = 24); filters = 0x16161616; } } else if (!strcmp(make,"Leica") || !strcmp(make,"Panasonic")) { if ((flen - data_offset) / (raw_width*8/7) == raw_height) load_raw = &CLASS panasonic_load_raw; if (!load_raw) { load_raw = &CLASS unpacked_load_raw; load_flags = 4; } zero_is_bad = 1; if ((height += 12) > raw_height) height = raw_height; for (i=0; i < sizeof pana / sizeof *pana; i++) if (raw_width == pana[i][0] && raw_height == pana[i][1]) { left_margin = pana[i][2]; top_margin = pana[i][3]; width += pana[i][4]; height += pana[i][5]; } filters = 0x01010101 * (uchar) "\x94\x61\x49\x16" [((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3]; } else if (!strcmp(model,"C770UZ")) { height = 1718; width = 2304; filters = 0x16161616; load_raw = &CLASS packed_load_raw; load_flags = 30; } else if (!strcmp(make,"Olympus")) { height += height & 1; if (exif_cfa) filters = exif_cfa; if (width == 4100) width -= 4; if (width == 4080) width -= 24; if (width == 9280) { width -= 6; height -= 6; } if (load_raw == &CLASS unpacked_load_raw) load_flags = 4; tiff_bps = 12; if (!strcmp(model,"E-300") || !strcmp(model,"E-500")) { width -= 20; if (load_raw == &CLASS unpacked_load_raw) { maximum = 0xfc3; memset (cblack, 0, sizeof cblack); } } else if (!strcmp(model,"STYLUS1")) { width -= 14; maximum = 0xfff; } else if (!strcmp(model,"E-330")) { width -= 30; if (load_raw == &CLASS unpacked_load_raw) maximum = 0xf79; } else if (!strcmp(model,"SP550UZ")) { thumb_length = flen - (thumb_offset = 0xa39800); thumb_height = 480; thumb_width = 640; } } else if (!strcmp(model,"N Digital")) { height = 2047; width = 3072; filters = 0x61616161; data_offset = 0x1a00; load_raw = &CLASS packed_load_raw; } else if (!strcmp(model,"DSC-F828")) { width = 3288; left_margin = 5; mask[1][3] = -17; data_offset = 862144; load_raw = &CLASS sony_load_raw; filters = 0x9c9c9c9c; colors = 4; strcpy (cdesc, "RGBE"); } else if (!strcmp(model,"DSC-V3")) { width = 3109; left_margin = 59; mask[0][1] = 9; data_offset = 787392; load_raw = &CLASS sony_load_raw; } else if (!strcmp(make,"Sony") && raw_width == 3984) { width = 3925; order = 0x4d4d; } else if (!strcmp(make,"Sony") && raw_width == 4288) { width -= 32; } else if (!strcmp(make,"Sony") && raw_width == 4928) { if (height < 3280) width -= 8; } else if (!strcmp(make,"Sony") && raw_width == 5504) { // ILCE-3000//5000 width -= height > 3664 ? 8 : 32; } else if (!strcmp(make,"Sony") && raw_width == 6048) { width -= 24; if (strstr(model,"RX1") || strstr(model,"A99")) width -= 6; } else if (!strcmp(make,"Sony") && raw_width == 7392) { width -= 30; } else if (!strcmp(model,"DSLR-A100")) { if (width == 3880) { height--; width = ++raw_width; } else { height -= 4; width -= 4; order = 0x4d4d; load_flags = 2; } filters = 0x61616161; } else if (!strcmp(model,"DSLR-A350")) { height -= 4; } else if (!strcmp(model,"PIXL")) { height -= top_margin = 4; width -= left_margin = 32; gamma_curve (0, 7, 1, 255); } else if (!strcmp(model,"C603") || !strcmp(model,"C330") || !strcmp(model,"12MP")) { order = 0x4949; if (filters && data_offset) { fseek (ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET); read_shorts (curve, 256); } else gamma_curve (0, 3.875, 1, 255); load_raw = filters ? &CLASS eight_bit_load_raw : strcmp(model,"C330") ? &CLASS kodak_c603_load_raw : &CLASS kodak_c330_load_raw; load_flags = tiff_bps > 16; tiff_bps = 8; } else if (!strncasecmp(model,"EasyShare",9)) { data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000; load_raw = &CLASS packed_load_raw; } else if (!strcasecmp(make,"Kodak")) { if (filters == UINT_MAX) filters = 0x61616161; if (!strncmp(model,"NC2000",6) || !strncmp(model,"EOSDCS",6) || !strncmp(model,"DCS4",4)) { width -= 4; left_margin = 2; if (model[6] == ' ') model[6] = 0; if (!strcmp(model,"DCS460A")) goto bw; } else if (!strcmp(model,"DCS660M")) { black = 214; goto bw; } else if (!strcmp(model,"DCS760M")) { bw: colors = 1; filters = 0; } if (!strcmp(model+4,"20X")) strcpy (cdesc, "MYCY"); if (strstr(model,"DC25")) { strcpy (model, "DC25"); data_offset = 15424; } if (!strncmp(model,"DC2",3)) { raw_height = 2 + (height = 242); if (!strncmp(model, "DC290", 5)) iso_speed = 100; if (!strncmp(model, "DC280", 5)) iso_speed = 70; if (flen < 100000) { raw_width = 256; width = 249; pixel_aspect = (4.0*height) / (3.0*width); } else { raw_width = 512; width = 501; pixel_aspect = (493.0*height) / (373.0*width); } top_margin = left_margin = 1; colors = 4; filters = 0x8d8d8d8d; simple_coeff(1); pre_mul[1] = 1.179; pre_mul[2] = 1.209; pre_mul[3] = 1.036; load_raw = &CLASS eight_bit_load_raw; } else if (!strcmp(model,"40")) { strcpy (model, "DC40"); height = 512; width = 768; data_offset = 1152; load_raw = &CLASS kodak_radc_load_raw; } else if (strstr(model,"DC50")) { strcpy (model, "DC50"); height = 512; width = 768; iso_speed=84; data_offset = 19712; load_raw = &CLASS kodak_radc_load_raw; } else if (strstr(model,"DC120")) { strcpy (model, "DC120"); height = 976; width = 848; iso_speed=160; pixel_aspect = height/0.75/width; load_raw = tiff_compress == 7 ? &CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw; } else if (!strcmp(model,"DCS200")) { thumb_height = 128; thumb_width = 192; thumb_offset = 6144; thumb_misc = 360; iso_speed=140; write_thumb = &CLASS layer_thumb; black = 17; } } else if (!strcmp(model,"Fotoman Pixtura")) { height = 512; width = 768; data_offset = 3632; load_raw = &CLASS kodak_radc_load_raw; filters = 0x61616161; simple_coeff(2); } else if (!strncmp(model,"QuickTake",9)) { if (head[5]) strcpy (model+10, "200"); fseek (ifp, 544, SEEK_SET); height = get2(); width = get2(); data_offset = (get4(),get2()) == 30 ? 738:736; if (height > width) { SWAP(height,width); fseek (ifp, data_offset-6, SEEK_SET); flip = ~get2() & 3 ? 5:6; } filters = 0x61616161; } else if (!strcmp(make,"Rollei") && !load_raw) { switch (raw_width) { case 1316: height = 1030; width = 1300; top_margin = 1; left_margin = 6; break; case 2568: height = 1960; width = 2560; top_margin = 2; left_margin = 8; } filters = 0x16161616; load_raw = &CLASS rollei_load_raw; } else if (!strcmp(model,"GRAS-50S5C")) { height = 2048; width = 2440; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x49494949; order = 0x4949; maximum = 0xfffC; } else if (!strcmp(model,"BB-500CL")) { height = 2058; width = 2448; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model,"BB-500GE")) { height = 2058; width = 2456; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model,"SVS625CL")) { height = 2050; width = 2448; load_raw = &CLASS unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x0fff; } /* Early reject for damaged images */ if (!load_raw || height < 22 || width < 22 || tiff_bps > 16 || tiff_samples > 4 || colors > 4 || colors < 1) { is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2); #endif return; } if (!model[0]) sprintf (model, "%dx%d", width, height); if (filters == UINT_MAX) filters = 0x94949494; if (thumb_offset && !thumb_height) { fseek (ifp, thumb_offset, SEEK_SET); if (ljpeg_start (&jh, 1)) { thumb_width = jh.wide; thumb_height = jh.high; } } dng_skip: if ((use_camera_matrix & (use_camera_wb || dng_version)) && cmatrix[0][0] > 0.125) { memcpy (rgb_cam, cmatrix, sizeof cmatrix); raw_color = 0; } if (raw_color) adobe_coeff (make, model); #ifdef LIBRAW_LIBRARY_BUILD else if(imgdata.color.cam_xyz[0][0]<0.01) adobe_coeff (make, model,1); #endif if (load_raw == &CLASS kodak_radc_load_raw) if (raw_color) adobe_coeff ("Apple","Quicktake"); if (fuji_width) { fuji_width = width >> !fuji_layout; if (~fuji_width & 1) filters = 0x49494949; width = (height >> fuji_layout) + fuji_width; height = width - 1; pixel_aspect = 1; } else { if (raw_height < height) raw_height = height; if (raw_width < width ) raw_width = width; } if (!tiff_bps) tiff_bps = 12; if (!maximum) { maximum = (1 << tiff_bps) - 1; if(maximum < 0x10000 && curve[maximum]>0 && load_raw == &CLASS sony_arw2_load_raw) maximum = curve[maximum]; } if (!load_raw || height < 22 || width < 22 || tiff_bps > 16 || tiff_samples > 6 || colors > 4) is_raw = 0; #ifdef NO_JASPER if (load_raw == &CLASS redcine_load_raw) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: You must link dcraw with %s!!\n"), ifname, "libjasper"); #endif is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER; #endif } #endif #ifdef NO_JPEG if (load_raw == &CLASS kodak_jpeg_load_raw || load_raw == &CLASS lossy_dng_load_raw) { #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s: You must link dcraw with %s!!\n"), ifname, "libjpeg"); #endif is_raw = 0; #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB; #endif } #endif if (!cdesc[0]) strcpy (cdesc, colors == 3 ? "RGBG":"GMCY"); if (!raw_height) raw_height = height; if (!raw_width ) raw_width = width; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; notraw: if (flip == UINT_MAX) flip = tiff_flip; if (flip == UINT_MAX) flip = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2); #endif } //@end COMMON //@out FILEIO #ifndef NO_LCMS void CLASS apply_profile (const char *input, const char *output) { char *prof; cmsHPROFILE hInProfile=0, hOutProfile=0; cmsHTRANSFORM hTransform; FILE *fp; unsigned size; if (strcmp (input, "embed")) hInProfile = cmsOpenProfileFromFile (input, "r"); else if (profile_length) { #ifndef LIBRAW_LIBRARY_BUILD prof = (char *) malloc (profile_length); merror (prof, "apply_profile()"); fseek (ifp, profile_offset, SEEK_SET); fread (prof, 1, profile_length, ifp); hInProfile = cmsOpenProfileFromMem (prof, profile_length); free (prof); #else hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length); #endif } else { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE; #endif #ifdef DCRAW_VERBOSE fprintf (stderr,_("%s has no embedded profile.\n"), ifname); #endif } if (!hInProfile) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE; #endif return; } if (!output) hOutProfile = cmsCreate_sRGBProfile(); else if ((fp = fopen (output, "rb"))) { fread (&size, 4, 1, fp); fseek (fp, 0, SEEK_SET); oprof = (unsigned *) malloc (size = ntohl(size)); merror (oprof, "apply_profile()"); fread (oprof, 1, size, fp); fclose (fp); if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) { free (oprof); oprof = 0; } } #ifdef DCRAW_VERBOSE else fprintf (stderr,_("Cannot open file %s!\n"), output); #endif if (!hOutProfile) { #ifdef LIBRAW_LIBRARY_BUILD imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE; #endif goto quit; } #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Applying color profile...\n")); #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2); #endif hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16, hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0); cmsDoTransform (hTransform, image, image, width*height); raw_color = 1; /* Don't use rgb_cam with a profile */ cmsDeleteTransform (hTransform); cmsCloseProfile (hOutProfile); quit: cmsCloseProfile (hInProfile); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2); #endif } #endif //@end FILEIO //@out COMMON void CLASS convert_to_rgb() { #ifndef LIBRAW_LIBRARY_BUILD int row, col, c; #endif int i, j, k; #ifndef LIBRAW_LIBRARY_BUILD ushort *img; float out[3]; #endif float out_cam[3][4]; double num, inverse[3][3]; static const double xyzd50_srgb[3][3] = { { 0.436083, 0.385083, 0.143055 }, { 0.222507, 0.716888, 0.060608 }, { 0.013930, 0.097097, 0.714022 } }; static const double rgb_rgb[3][3] = { { 1,0,0 }, { 0,1,0 }, { 0,0,1 } }; static const double adobe_rgb[3][3] = { { 0.715146, 0.284856, 0.000000 }, { 0.000000, 1.000000, 0.000000 }, { 0.000000, 0.041166, 0.958839 } }; static const double wide_rgb[3][3] = { { 0.593087, 0.404710, 0.002206 }, { 0.095413, 0.843149, 0.061439 }, { 0.011621, 0.069091, 0.919288 } }; static const double prophoto_rgb[3][3] = { { 0.529317, 0.330092, 0.140588 }, { 0.098368, 0.873465, 0.028169 }, { 0.016879, 0.117663, 0.865457 } }; static const double (*out_rgb[])[3] = { rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb }; static const char *name[] = { "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ" }; static const unsigned phead[] = { 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0, 0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d }; unsigned pbody[] = { 10, 0x63707274, 0, 36, /* cprt */ 0x64657363, 0, 40, /* desc */ 0x77747074, 0, 20, /* wtpt */ 0x626b7074, 0, 20, /* bkpt */ 0x72545243, 0, 14, /* rTRC */ 0x67545243, 0, 14, /* gTRC */ 0x62545243, 0, 14, /* bTRC */ 0x7258595a, 0, 20, /* rXYZ */ 0x6758595a, 0, 20, /* gXYZ */ 0x6258595a, 0, 20 }; /* bXYZ */ static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc }; unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 }; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2); #endif gamma_curve (gamm[0], gamm[1], 0, 0); memcpy (out_cam, rgb_cam, sizeof out_cam); #ifndef LIBRAW_LIBRARY_BUILD raw_color |= colors == 1 || document_mode || output_color < 1 || output_color > 5; #else raw_color |= colors == 1 || output_color < 1 || output_color > 5; #endif if (!raw_color) { oprof = (unsigned *) calloc (phead[0], 1); merror (oprof, "convert_to_rgb()"); memcpy (oprof, phead, sizeof phead); if (output_color == 5) oprof[4] = oprof[5]; oprof[0] = 132 + 12*pbody[0]; for (i=0; i < pbody[0]; i++) { oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874; pbody[i*3+2] = oprof[0]; oprof[0] += (pbody[i*3+3] + 3) & -4; } memcpy (oprof+32, pbody, sizeof pbody); oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1; memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite); pcurve[3] = (short)(256/gamm[5]+0.5) << 16; for (i=4; i < 7; i++) memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve); pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3); for (i=0; i < 3; i++) for (j=0; j < 3; j++) { for (num = k=0; k < 3; k++) num += xyzd50_srgb[i][k] * inverse[j][k]; oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5; } for (i=0; i < phead[0]/4; i++) oprof[i] = htonl(oprof[i]); strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw"); strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]); for (i=0; i < 3; i++) for (j=0; j < colors; j++) for (out_cam[i][j] = k=0; k < 3; k++) out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j]; } #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr, raw_color ? _("Building histograms...\n") : _("Converting to %s colorspace...\n"), name[output_color-1]); #endif #ifdef LIBRAW_LIBRARY_BUILD convert_to_rgb_loop(out_cam); #else memset (histogram, 0, sizeof histogram); for (img=image[0], row=0; row < height; row++) for (col=0; col < width; col++, img+=4) { if (!raw_color) { out[0] = out[1] = out[2] = 0; FORCC { out[0] += out_cam[0][c] * img[c]; out[1] += out_cam[1][c] * img[c]; out[2] += out_cam[2][c] * img[c]; } FORC3 img[c] = CLIP((int) out[c]); } else if (document_mode) img[0] = img[fcol(row,col)]; FORCC histogram[c][img[c] >> 3]++; } #endif if (colors == 4 && output_color) colors = 3; #ifndef LIBRAW_LIBRARY_BUILD if (document_mode && filters) colors = 1; #endif #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2); #endif } void CLASS fuji_rotate() { int i, row, col; double step; float r, c, fr, fc; unsigned ur, uc; ushort wide, high, (*img)[4], (*pix)[4]; if (!fuji_width) return; #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Rotating image 45 degrees...\n")); #endif fuji_width = (fuji_width - 1 + shrink) >> shrink; step = sqrt(0.5); wide = fuji_width / step; high = (height - fuji_width) / step; img = (ushort (*)[4]) calloc (high, wide*sizeof *img); merror (img, "fuji_rotate()"); #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2); #endif for (row=0; row < high; row++) for (col=0; col < wide; col++) { ur = r = fuji_width + (row-col)*step; uc = c = (row+col)*step; if (ur > height-2 || uc > width-2) continue; fr = r - ur; fc = c - uc; pix = image + ur*width + uc; for (i=0; i < colors; i++) img[row*wide+col][i] = (pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) + (pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr; } free (image); width = wide; height = high; image = img; fuji_width = 0; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2); #endif } void CLASS stretch() { ushort newdim, (*img)[4], *pix0, *pix1; int row, col, c; double rc, frac; if (pixel_aspect == 1) return; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2); #endif #ifdef DCRAW_VERBOSE if (verbose) fprintf (stderr,_("Stretching the image...\n")); #endif if (pixel_aspect < 1) { newdim = height / pixel_aspect + 0.5; img = (ushort (*)[4]) calloc (width, newdim*sizeof *img); merror (img, "stretch()"); for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) { frac = rc - (c = rc); pix0 = pix1 = image[c*width]; if (c+1 < height) pix1 += width*4; for (col=0; col < width; col++, pix0+=4, pix1+=4) FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5; } height = newdim; } else { newdim = width * pixel_aspect + 0.5; img = (ushort (*)[4]) calloc (height, newdim*sizeof *img); merror (img, "stretch()"); for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) { frac = rc - (c = rc); pix0 = pix1 = image[c]; if (c+1 < width) pix1 += 4; for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4) FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5; } width = newdim; } free (image); image = img; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2); #endif } int CLASS flip_index (int row, int col) { if (flip & 4) SWAP(row,col); if (flip & 2) row = iheight - 1 - row; if (flip & 1) col = iwidth - 1 - col; return row * iwidth + col; } //@end COMMON struct tiff_tag { ushort tag, type; int count; union { char c[4]; short s[2]; int i; } val; }; struct tiff_hdr { ushort t_order, magic; int ifd; ushort pad, ntag; struct tiff_tag tag[23]; int nextifd; ushort pad2, nexif; struct tiff_tag exif[4]; ushort pad3, ngps; struct tiff_tag gpst[10]; short bps[4]; int rat[10]; unsigned gps[26]; char t_desc[512], t_make[64], t_model[64], soft[32], date[20], t_artist[64]; }; //@out COMMON void CLASS tiff_set (ushort *ntag, ushort tag, ushort type, int count, int val) { struct tiff_tag *tt; int c; tt = (struct tiff_tag *)(ntag+1) + (*ntag)++; tt->tag = tag; tt->type = type; tt->count = count; if (type < 3 && count <= 4) FORC(4) tt->val.c[c] = val >> (c << 3); else if (type == 3 && count <= 2) FORC(2) tt->val.s[c] = val >> (c << 4); else tt->val.i = val; } #define TOFF(ptr) ((char *)(&(ptr)) - (char *)th) void CLASS tiff_head (struct tiff_hdr *th, int full) { int c, psize=0; struct tm *t; memset (th, 0, sizeof *th); th->t_order = htonl(0x4d4d4949) >> 16; th->magic = 42; th->ifd = 10; if (full) { tiff_set (&th->ntag, 254, 4, 1, 0); tiff_set (&th->ntag, 256, 4, 1, width); tiff_set (&th->ntag, 257, 4, 1, height); tiff_set (&th->ntag, 258, 3, colors, output_bps); if (colors > 2) th->tag[th->ntag-1].val.i = TOFF(th->bps); FORC4 th->bps[c] = output_bps; tiff_set (&th->ntag, 259, 3, 1, 1); tiff_set (&th->ntag, 262, 3, 1, 1 + (colors > 1)); } tiff_set (&th->ntag, 270, 2, 512, TOFF(th->t_desc)); tiff_set (&th->ntag, 271, 2, 64, TOFF(th->t_make)); tiff_set (&th->ntag, 272, 2, 64, TOFF(th->t_model)); if (full) { if (oprof) psize = ntohl(oprof[0]); tiff_set (&th->ntag, 273, 4, 1, sizeof *th + psize); tiff_set (&th->ntag, 277, 3, 1, colors); tiff_set (&th->ntag, 278, 4, 1, height); tiff_set (&th->ntag, 279, 4, 1, height*width*colors*output_bps/8); } else tiff_set (&th->ntag, 274, 3, 1, "12435867"[flip]-'0'); tiff_set (&th->ntag, 282, 5, 1, TOFF(th->rat[0])); tiff_set (&th->ntag, 283, 5, 1, TOFF(th->rat[2])); tiff_set (&th->ntag, 284, 3, 1, 1); tiff_set (&th->ntag, 296, 3, 1, 2); tiff_set (&th->ntag, 305, 2, 32, TOFF(th->soft)); tiff_set (&th->ntag, 306, 2, 20, TOFF(th->date)); tiff_set (&th->ntag, 315, 2, 64, TOFF(th->t_artist)); tiff_set (&th->ntag, 34665, 4, 1, TOFF(th->nexif)); if (psize) tiff_set (&th->ntag, 34675, 7, psize, sizeof *th); tiff_set (&th->nexif, 33434, 5, 1, TOFF(th->rat[4])); tiff_set (&th->nexif, 33437, 5, 1, TOFF(th->rat[6])); tiff_set (&th->nexif, 34855, 3, 1, iso_speed); tiff_set (&th->nexif, 37386, 5, 1, TOFF(th->rat[8])); if (gpsdata[1]) { tiff_set (&th->ntag, 34853, 4, 1, TOFF(th->ngps)); tiff_set (&th->ngps, 0, 1, 4, 0x202); tiff_set (&th->ngps, 1, 2, 2, gpsdata[29]); tiff_set (&th->ngps, 2, 5, 3, TOFF(th->gps[0])); tiff_set (&th->ngps, 3, 2, 2, gpsdata[30]); tiff_set (&th->ngps, 4, 5, 3, TOFF(th->gps[6])); tiff_set (&th->ngps, 5, 1, 1, gpsdata[31]); tiff_set (&th->ngps, 6, 5, 1, TOFF(th->gps[18])); tiff_set (&th->ngps, 7, 5, 3, TOFF(th->gps[12])); tiff_set (&th->ngps, 18, 2, 12, TOFF(th->gps[20])); tiff_set (&th->ngps, 29, 2, 12, TOFF(th->gps[23])); memcpy (th->gps, gpsdata, sizeof th->gps); } th->rat[0] = th->rat[2] = 300; th->rat[1] = th->rat[3] = 1; FORC(6) th->rat[4+c] = 1000000; th->rat[4] *= shutter; th->rat[6] *= aperture; th->rat[8] *= focal_len; strncpy (th->t_desc, desc, 512); strncpy (th->t_make, make, 64); strncpy (th->t_model, model, 64); strcpy (th->soft, "dcraw v" DCRAW_VERSION); t = localtime (&timestamp); sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d", t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec); strncpy (th->t_artist, artist, 64); } #ifdef LIBRAW_LIBRARY_BUILD void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length) { ushort exif[5]; struct tiff_hdr th; fputc (0xff, tfp); fputc (0xd8, tfp); if (strcmp (t_humb+6, "Exif")) { memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); fwrite (exif, 1, sizeof exif, tfp); tiff_head (&th, 0); fwrite (&th, 1, sizeof th, tfp); } fwrite (t_humb+2, 1, t_humb_length-2, tfp); } void CLASS jpeg_thumb() { char *thumb; thumb = (char *) malloc (thumb_length); merror (thumb, "jpeg_thumb()"); fread (thumb, 1, thumb_length, ifp); jpeg_thumb_writer(ofp,thumb,thumb_length); free (thumb); } #else void CLASS jpeg_thumb() { char *thumb; ushort exif[5]; struct tiff_hdr th; thumb = (char *) malloc (thumb_length); merror (thumb, "jpeg_thumb()"); fread (thumb, 1, thumb_length, ifp); fputc (0xff, ofp); fputc (0xd8, ofp); if (strcmp (thumb+6, "Exif")) { memcpy (exif, "\xff\xe1 Exif\0\0", 10); exif[1] = htons (8 + sizeof th); fwrite (exif, 1, sizeof exif, ofp); tiff_head (&th, 0); fwrite (&th, 1, sizeof th, ofp); } fwrite (thumb+2, 1, thumb_length-2, ofp); free (thumb); } #endif void CLASS write_ppm_tiff() { struct tiff_hdr th; uchar *ppm; ushort *ppm2; int c, row, col, soff, rstep, cstep; int perc, val, total, t_white=0x2000; #ifdef LIBRAW_LIBRARY_BUILD perc = width * height * auto_bright_thr; #else perc = width * height * 0.01; /* 99th percentile white level */ #endif if (fuji_width) perc /= 2; if (!((highlight & ~2) || no_auto_bright)) for (t_white=c=0; c < colors; c++) { for (val=0x2000, total=0; --val > 32; ) if ((total += histogram[c][val]) > perc) break; if (t_white < val) t_white = val; } gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright); iheight = height; iwidth = width; if (flip & 4) SWAP(height,width); ppm = (uchar *) calloc (width, colors*output_bps/8); ppm2 = (ushort *) ppm; merror (ppm, "write_ppm_tiff()"); if (output_tiff) { tiff_head (&th, 1); fwrite (&th, sizeof th, 1, ofp); if (oprof) fwrite (oprof, ntohl(oprof[0]), 1, ofp); } else if (colors > 3) fprintf (ofp, "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n", width, height, colors, (1 << output_bps)-1, cdesc); else fprintf (ofp, "P%d\n%d %d\n%d\n", colors/2+5, width, height, (1 << output_bps)-1); soff = flip_index (0, 0); cstep = flip_index (0, 1) - soff; rstep = flip_index (1, 0) - flip_index (0, width); for (row=0; row < height; row++, soff += rstep) { for (col=0; col < width; col++, soff += cstep) if (output_bps == 8) FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8; else FORCC ppm2[col*colors+c] = curve[image[soff][c]]; if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa) swab ((char*)ppm2, (char*)ppm2, width*colors*2); fwrite (ppm, colors*output_bps/8, width, ofp); } free (ppm); } //@end COMMON int CLASS main (int argc, const char **argv) { int arg, status=0, quality, i, c; int timestamp_only=0, thumbnail_only=0, identify_only=0; int user_qual=-1, user_black=-1, user_sat=-1, user_flip=-1; int use_fuji_rotate=1, write_to_stdout=0, read_from_stdin=0; const char *sp, *bpfile=0, *dark_frame=0, *write_ext; char opm, opt, *ofname, *cp; struct utimbuf ut; #ifndef NO_LCMS const char *cam_profile=0, *out_profile=0; #endif #ifndef LOCALTIME putenv ((char *) "TZ=UTC"); #endif #ifdef LOCALEDIR setlocale (LC_CTYPE, ""); setlocale (LC_MESSAGES, ""); bindtextdomain ("dcraw", LOCALEDIR); textdomain ("dcraw"); #endif if (argc == 1) { printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION); printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n")); printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]); puts(_("-v Print verbose messages")); puts(_("-c Write image data to standard output")); puts(_("-e Extract embedded thumbnail image")); puts(_("-i Identify files without decoding them")); puts(_("-i -v Identify files and show metadata")); puts(_("-z Change file dates to camera timestamp")); puts(_("-w Use camera white balance, if possible")); puts(_("-a Average the whole image for white balance")); puts(_("-A <x y w h> Average a grey box for white balance")); puts(_("-r <r g b g> Set custom white balance")); puts(_("+M/-M Use/don't use an embedded color matrix")); puts(_("-C <r b> Correct chromatic aberration")); puts(_("-P <file> Fix the dead pixels listed in this file")); puts(_("-K <file> Subtract dark frame (16-bit raw PGM)")); puts(_("-k <num> Set the darkness level")); puts(_("-S <num> Set the saturation level")); puts(_("-n <num> Set threshold for wavelet denoising")); puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)")); puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)")); puts(_("-o [0-5] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ)")); #ifndef NO_LCMS puts(_("-o <file> Apply output ICC profile from file")); puts(_("-p <file> Apply camera ICC profile from file or \"embed\"")); #endif puts(_("-d Document mode (no color, no interpolation)")); puts(_("-D Document mode without scaling (totally raw)")); puts(_("-j Don't stretch or rotate raw pixels")); puts(_("-W Don't automatically brighten the image")); puts(_("-b <num> Adjust brightness (default = 1.0)")); puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)")); puts(_("-q [0-3] Set the interpolation quality")); puts(_("-h Half-size color image (twice as fast as \"-q 0\")")); puts(_("-f Interpolate RGGB as four colors")); puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G")); puts(_("-s [0..N-1] Select one raw image or \"all\" from each file")); puts(_("-6 Write 16-bit instead of 8-bit")); puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\"")); puts(_("-T Write TIFF instead of PPM")); puts(""); return 1; } argv[argc] = ""; for (arg=1; (((opm = argv[arg][0]) - 2) | 2) == '+'; ) { opt = argv[arg++][1]; if ((cp = (char *) strchr (sp="nbrkStqmHACg", opt))) for (i=0; i < "114111111422"[cp-sp]-'0'; i++) if (!isdigit(argv[arg+i][0])) { fprintf (stderr,_("Non-numeric argument to \"-%c\"\n"), opt); return 1; } switch (opt) { case 'n': threshold = atof(argv[arg++]); break; case 'b': bright = atof(argv[arg++]); break; case 'r': FORC4 user_mul[c] = atof(argv[arg++]); break; case 'C': aber[0] = 1 / atof(argv[arg++]); aber[2] = 1 / atof(argv[arg++]); break; case 'g': gamm[0] = atof(argv[arg++]); gamm[1] = atof(argv[arg++]); if (gamm[0]) gamm[0] = 1/gamm[0]; break; case 'k': user_black = atoi(argv[arg++]); break; case 'S': user_sat = atoi(argv[arg++]); break; case 't': user_flip = atoi(argv[arg++]); break; case 'q': user_qual = atoi(argv[arg++]); break; case 'm': med_passes = atoi(argv[arg++]); break; case 'H': highlight = atoi(argv[arg++]); break; case 's': shot_select = abs(atoi(argv[arg])); multi_out = !strcmp(argv[arg++],"all"); break; case 'o': if (isdigit(argv[arg][0]) && !argv[arg][1]) output_color = atoi(argv[arg++]); #ifndef NO_LCMS else out_profile = argv[arg++]; break; case 'p': cam_profile = argv[arg++]; #endif break; case 'P': bpfile = argv[arg++]; break; case 'K': dark_frame = argv[arg++]; break; case 'z': timestamp_only = 1; break; case 'e': thumbnail_only = 1; break; case 'i': identify_only = 1; break; case 'c': write_to_stdout = 1; break; case 'v': verbose = 1; break; case 'h': half_size = 1; break; case 'f': four_color_rgb = 1; break; case 'A': FORC4 greybox[c] = atoi(argv[arg++]); case 'a': use_auto_wb = 1; break; case 'w': use_camera_wb = 1; break; case 'M': use_camera_matrix = 3 * (opm == '+'); break; case 'I': read_from_stdin = 1; break; case 'E': document_mode++; case 'D': document_mode++; case 'd': document_mode++; case 'j': use_fuji_rotate = 0; break; case 'W': no_auto_bright = 1; break; case 'T': output_tiff = 1; break; case '4': gamm[0] = gamm[1] = no_auto_bright = 1; case '6': output_bps = 16; break; default: fprintf (stderr,_("Unknown option \"-%c\".\n"), opt); return 1; } } if (arg == argc) { fprintf (stderr,_("No files to process.\n")); return 1; } if (write_to_stdout) { if (isatty(1)) { fprintf (stderr,_("Will not write an image to the terminal!\n")); return 1; } #if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__) if (setmode(1,O_BINARY) < 0) { perror ("setmode()"); return 1; } #endif } for ( ; arg < argc; arg++) { status = 1; raw_image = 0; image = 0; oprof = 0; meta_data = ofname = 0; ofp = stdout; if (setjmp (failure)) { if (fileno(ifp) > 2) fclose(ifp); if (fileno(ofp) > 2) fclose(ofp); status = 1; goto cleanup; } ifname = argv[arg]; if (!(ifp = fopen (ifname, "rb"))) { perror (ifname); continue; } status = (identify(),!is_raw); if (user_flip >= 0) flip = user_flip; switch ((flip+3600) % 360) { case 270: flip = 5; break; case 180: flip = 3; break; case 90: flip = 6; } if (timestamp_only) { if ((status = !timestamp)) fprintf (stderr,_("%s has no timestamp.\n"), ifname); else if (identify_only) printf ("%10ld%10d %s\n", (long) timestamp, shot_order, ifname); else { if (verbose) fprintf (stderr,_("%s time set to %d.\n"), ifname, (int) timestamp); ut.actime = ut.modtime = timestamp; utime (ifname, &ut); } goto next; } write_fun = &CLASS write_ppm_tiff; if (thumbnail_only) { if ((status = !thumb_offset)) { fprintf (stderr,_("%s has no thumbnail.\n"), ifname); goto next; } else if (thumb_load_raw) { load_raw = thumb_load_raw; data_offset = thumb_offset; height = thumb_height; width = thumb_width; filters = 0; colors = 3; } else { fseek (ifp, thumb_offset, SEEK_SET); write_fun = write_thumb; goto thumbnail; } } if (load_raw == &CLASS kodak_ycbcr_load_raw) { height += height & 1; width += width & 1; } if (identify_only && verbose && make[0]) { printf (_("\nFilename: %s\n"), ifname); printf (_("Timestamp: %s"), ctime(&timestamp)); printf (_("Camera: %s %s\n"), make, model); if (artist[0]) printf (_("Owner: %s\n"), artist); if (dng_version) { printf (_("DNG Version: ")); for (i=24; i >= 0; i -= 8) printf ("%d%c", dng_version >> i & 255, i ? '.':'\n'); } printf (_("ISO speed: %d\n"), (int) iso_speed); printf (_("Shutter: ")); if (shutter > 0 && shutter < 1) shutter = (printf ("1/"), 1 / shutter); printf (_("%0.1f sec\n"), shutter); printf (_("Aperture: f/%0.1f\n"), aperture); printf (_("Focal length: %0.1f mm\n"), focal_len); printf (_("Embedded ICC profile: %s\n"), profile_length ? _("yes"):_("no")); printf (_("Number of raw images: %d\n"), is_raw); if (pixel_aspect != 1) printf (_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect); if (thumb_offset) printf (_("Thumb size: %4d x %d\n"), thumb_width, thumb_height); printf (_("Full size: %4d x %d\n"), raw_width, raw_height); } else if (!is_raw) fprintf (stderr,_("Cannot decode file %s\n"), ifname); if (!is_raw) goto next; shrink = filters && (half_size || (!identify_only && (threshold || aber[0] != 1 || aber[2] != 1))); iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (identify_only) { if (verbose) { if (document_mode == 3) { top_margin = left_margin = fuji_width = 0; height = raw_height; width = raw_width; } iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (use_fuji_rotate) { if (fuji_width) { fuji_width = (fuji_width - 1 + shrink) >> shrink; iwidth = fuji_width / sqrt(0.5); iheight = (iheight - fuji_width) / sqrt(0.5); } else { if (pixel_aspect < 1) iheight = iheight / pixel_aspect + 0.5; if (pixel_aspect > 1) iwidth = iwidth * pixel_aspect + 0.5; } } if (flip & 4) SWAP(iheight,iwidth); printf (_("Image size: %4d x %d\n"), width, height); printf (_("Output size: %4d x %d\n"), iwidth, iheight); printf (_("Raw colors: %d"), colors); if (filters) { int fhigh = 2, fwide = 2; if ((filters ^ (filters >> 8)) & 0xff) fhigh = 4; if ((filters ^ (filters >> 16)) & 0xffff) fhigh = 8; if (filters == 1) fhigh = fwide = 16; if (filters == 9) fhigh = fwide = 6; printf (_("\nFilter pattern: ")); for (i=0; i < fhigh; i++) for (c = i && putchar('/') && 0; c < fwide; c++) putchar (cdesc[fcol(i,c)]); } printf (_("\nDaylight multipliers:")); FORCC printf (" %f", pre_mul[c]); if (cam_mul[0] > 0) { printf (_("\nCamera multipliers:")); FORC4 printf (" %f", cam_mul[c]); } putchar ('\n'); } else printf (_("%s is a %s %s image.\n"), ifname, make, model); next: fclose(ifp); continue; } if (meta_length) { meta_data = (char *) malloc (meta_length); merror (meta_data, "main()"); } if (filters || colors == 1) { raw_image = (ushort *) calloc ((raw_height+7), raw_width*2); merror (raw_image, "main()"); } else { image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image); merror (image, "main()"); } if (verbose) fprintf (stderr,_("Loading %s %s image from %s ...\n"), make, model, ifname); if (shot_select >= is_raw) fprintf (stderr,_("%s: \"-s %d\" requests a nonexistent image!\n"), ifname, shot_select); fseeko (ifp, data_offset, SEEK_SET); if (raw_image && read_from_stdin) fread (raw_image, 2, raw_height*raw_width, stdin); else (*load_raw)(); if (document_mode == 3) { top_margin = left_margin = fuji_width = 0; height = raw_height; width = raw_width; } iheight = (height + shrink) >> shrink; iwidth = (width + shrink) >> shrink; if (raw_image) { image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image); merror (image, "main()"); crop_masked_pixels(); free (raw_image); } if (zero_is_bad) remove_zeroes(); bad_pixels (bpfile); if (dark_frame) subtract (dark_frame); quality = 2 + !fuji_width; if (user_qual >= 0) quality = user_qual; i = cblack[3]; FORC3 if (i > cblack[c]) i = cblack[c]; FORC4 cblack[c] -= i; black += i; i = cblack[6]; FORC (cblack[4] * cblack[5]) if (i > cblack[6+c]) i = cblack[6+c]; FORC (cblack[4] * cblack[5]) cblack[6+c] -= i; black += i; if (user_black >= 0) black = user_black; FORC4 cblack[c] += black; if (user_sat > 0) maximum = user_sat; #ifdef COLORCHECK colorcheck(); #endif if (is_foveon) { if (document_mode || load_raw == &CLASS foveon_dp_load_raw) { for (i=0; i < height*width*4; i++) if ((short) image[0][i] < 0) image[0][i] = 0; } else foveon_interpolate(); } else if (document_mode < 2) scale_colors(); pre_interpolate(); if (filters && !document_mode) { if (quality == 0) lin_interpolate(); else if (quality == 1 || colors > 3) vng_interpolate(); else if (quality == 2 && filters > 1000) ppg_interpolate(); else if (filters == 9) xtrans_interpolate (quality*2-3); else ahd_interpolate(); } if (mix_green) for (colors=3, i=0; i < height*width; i++) image[i][1] = (image[i][1] + image[i][3]) >> 1; if (!is_foveon && colors == 3) median_filter(); if (!is_foveon && highlight == 2) blend_highlights(); if (!is_foveon && highlight > 2) recover_highlights(); if (use_fuji_rotate) fuji_rotate(); #ifndef NO_LCMS if (cam_profile) apply_profile (cam_profile, out_profile); #endif convert_to_rgb(); if (use_fuji_rotate) stretch(); thumbnail: if (write_fun == &CLASS jpeg_thumb) write_ext = ".jpg"; else if (output_tiff && write_fun == &CLASS write_ppm_tiff) write_ext = ".tiff"; else write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors*5-5; ofname = (char *) malloc (strlen(ifname) + 64); merror (ofname, "main()"); if (write_to_stdout) strcpy (ofname,_("standard output")); else { strcpy (ofname, ifname); if ((cp = strrchr (ofname, '.'))) *cp = 0; if (multi_out) sprintf (ofname+strlen(ofname), "_%0*d", snprintf(0,0,"%d",is_raw-1), shot_select); if (thumbnail_only) strcat (ofname, ".thumb"); strcat (ofname, write_ext); ofp = fopen (ofname, "wb"); if (!ofp) { status = 1; perror (ofname); goto cleanup; } } if (verbose) fprintf (stderr,_("Writing data to %s ...\n"), ofname); (*write_fun)(); fclose(ifp); if (ofp != stdout) fclose(ofp); cleanup: if (meta_data) free (meta_data); if (ofname) free (ofname); if (oprof) free (oprof); if (image) free (image); if (multi_out) { if (++shot_select < is_raw) arg--; else shot_select = 0; } } return status; } #endif
GB_unaryop__identity_int16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_uint16 // op(A') function: GB_tran__identity_int16_uint16 // C type: int16_t // A type: uint16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_uint16 ( int16_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
createlut.c
// this uses the coefficient cube optimiser from the paper: // // Wenzel Jakob and Johannes Hanika. A low-dimensional function space for // efficient spectral upsampling. Computer Graphics Forum (Proceedings of // Eurographics), 38(2), March 2019. // run like // make && ./createlut 512 lut.pfm XYZ && eu lut.pfm -w 1400 -h 1400 // creates spectra.lut (c0*1e5 y l s)/(x y) and abney.lut (x y)/(s l) #include <math.h> #include <string.h> #include <strings.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "details/lu.h" #include "details/matrices.h" #include "clip.h" #include "inpaint.h" #include "q2t.h" #include "../o-pfm/half.h" #include "../../../core/core.h" int use_bad_cmf = 0; // okay let's also hack the cie functions to our taste (or the gpu approximations we'll do) #define BAD_SAMPLES 30 #define BAD_FINE_SAMPLES 30 #define BAD_LAMBDA_MIN 400.0 #define BAD_LAMBDA_MAX 700.0 /// Discretization of quadrature scheme #define CIE_SAMPLES 95 #define CIE_LAMBDA_MIN 360.0 #define CIE_LAMBDA_MAX 830.0 #define CIE_FINE_SAMPLES ((CIE_SAMPLES - 1) * 3 + 1) #define RGB2SPEC_EPSILON 1e-4 #define MOM_EPS 1e-3 #include "details/cie1931.h" /// Precomputed tables for fast spectral -> RGB conversion double lambda_tbl[CIE_FINE_SAMPLES], rgb_tbl[3][CIE_FINE_SAMPLES], rgb_to_xyz[3][3], xyz_to_rgb[3][3], xyz_whitepoint[3]; /// Currently supported gamuts typedef enum Gamut { SRGB, ProPhotoRGB, ACES2065_1, ACES_AP1, REC2020, ERGB, XYZ, } Gamut; double sigmoid(double x) { return 0.5 * x / sqrt(1.0 + x * x) + 0.5; } double sqrd(double x) { return x * x; } void cvt_c0yl_c012(const double *c0yl, double *coeffs) { coeffs[0] = c0yl[0]; coeffs[1] = c0yl[2] * -2.0 * c0yl[0]; coeffs[2] = c0yl[1] + c0yl[0] * c0yl[2] * c0yl[2]; } void cvt_c012_c0yl(const double *coeffs, double *c0yl) { // account for normalising lambda: double c0 = CIE_LAMBDA_MIN, c1 = 1.0 / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double A = coeffs[0], B = coeffs[1], C = coeffs[2]; double A2 = (double)(A*(sqrd(c1))); double B2 = (double)(B*c1 - 2.0*A*c0*(sqrd(c1))); double C2 = (double)(C - B*c0*c1 + A*(sqrd(c0*c1))); if(fabs(A2) < 1e-12) { c0yl[0] = c0yl[1] = c0yl[2] = 0.0; return; } // convert to c0 y dom-lambda: c0yl[0] = A2; // square slope stays c0yl[2] = B2 / (-2.0*A2); // dominant wavelength c0yl[1] = C2 - B2*B2 / (4.0 * A2); // y } void quantise_coeffs(double coeffs[3], float out[3]) { // account for normalising lambda: double c0 = CIE_LAMBDA_MIN, c1 = 1.0 / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double A = coeffs[0], B = coeffs[1], C = coeffs[2]; const double A2 = (A*(sqrd(c1))); const double B2 = (B*c1 - 2*A*c0*(sqrd(c1))); const double C2 = (C - B*c0*c1 + A*(sqrd(c0*c1))); out[0] = (float)A2; out[1] = (float)B2; out[2] = (float)C2; #if 0 // DEBUG vis if(fabs(A2) < 1e-12) { out[0] = out[1] = out[2] = 0.0; return; } // convert to c0 y dom-lambda: out[0] = A2; // square slope stays out[1] = C2 - B2*B2 / (4.0 * A2); // y out[2] = B2 / (-2.0*A2); // dominant wavelength out[2] = (out[2] - c0)*c1; // normalise to [0,1] range for vis #endif } void init_coeffs(double coeffs[3]) { coeffs[0] = 0.0; coeffs[1] = 1.0; coeffs[2] = 0.0; } void clamp_coeffs(double coeffs[3]) { double max = fmax(fmax(fabs(coeffs[0]), fabs(coeffs[1])), fabs(coeffs[2])); if (max > 1000) { for (int j = 0; j < 3; ++j) coeffs[j] *= 1000 / max; } #if 0 // clamp dom lambda to visible range: // this will cause the fitter to diverge on the ridge. double c0yl[3]; c0yl[0] = coeffs[0]; if(fabs(coeffs[0]) < 1e-12) return; c0yl[2] = coeffs[1] / (-2.0*coeffs[0]); c0yl[1] = coeffs[2] - coeffs[1]*coeffs[1] / (4.0 * coeffs[0]); c0yl[2] = CLAMP(c0yl[2], 0.0, 1.0); coeffs[0] = c0yl[0]; coeffs[1] = c0yl[2] * -2.0 * c0yl[0]; coeffs[2] = c0yl[1] + c0yl[0] * c0yl[2] * c0yl[2]; #endif } int check_gamut(double rgb[3]) { double xyz[3] = {0.0}; for(int j=0;j<3;j++) for(int i=0;i<3;i++) xyz[i] += rgb_to_xyz[i][j] * rgb[j]; double x = xyz[0] / (xyz[0] + xyz[1] + xyz[2]); double y = xyz[1] / (xyz[0] + xyz[1] + xyz[2]); return spectrum_outside(x, y); } /** * This function precomputes tables used to convert arbitrary spectra * to RGB (either sRGB or ProPhoto RGB) * * A composite quadrature rule integrates the CIE curves, reflectance, and * illuminant spectrum over each 5nm segment in the 360..830nm range using * Simpson's 3/8 rule (4th-order accurate), which evaluates the integrand at * four positions per segment. While the CIE curves and illuminant spectrum are * linear over the segment, the reflectance could have arbitrary behavior, * hence the extra precations. */ void init_tables(Gamut gamut) { memset(rgb_tbl, 0, sizeof(rgb_tbl)); memset(xyz_whitepoint, 0, sizeof(xyz_whitepoint)); const double *illuminant = 0; switch (gamut) { case SRGB: illuminant = cie_d65; memcpy(xyz_to_rgb, xyz_to_srgb, sizeof(double) * 9); memcpy(rgb_to_xyz, srgb_to_xyz, sizeof(double) * 9); break; case ERGB: illuminant = cie_e; memcpy(xyz_to_rgb, xyz_to_ergb, sizeof(double) * 9); memcpy(rgb_to_xyz, ergb_to_xyz, sizeof(double) * 9); break; case XYZ: illuminant = cie_e; memcpy(xyz_to_rgb, xyz_to_xyz, sizeof(double) * 9); memcpy(rgb_to_xyz, xyz_to_xyz, sizeof(double) * 9); break; case ProPhotoRGB: illuminant = cie_d50; memcpy(xyz_to_rgb, xyz_to_prophoto_rgb, sizeof(double) * 9); memcpy(rgb_to_xyz, prophoto_rgb_to_xyz, sizeof(double) * 9); break; case ACES2065_1: illuminant = cie_d60; memcpy(xyz_to_rgb, xyz_to_aces2065_1, sizeof(double) * 9); memcpy(rgb_to_xyz, aces2065_1_to_xyz, sizeof(double) * 9); break; case ACES_AP1: illuminant = cie_d60; memcpy(xyz_to_rgb, xyz_to_aces_ap1, sizeof(double) * 9); memcpy(rgb_to_xyz, aces_ap1_to_xyz, sizeof(double) * 9); break; case REC2020: illuminant = cie_d65; memcpy(xyz_to_rgb, xyz_to_rec2020, sizeof(double) * 9); memcpy(rgb_to_xyz, rec2020_to_xyz, sizeof(double) * 9); break; } for (int i = 0; i < CIE_FINE_SAMPLES; ++i) { double h, lambda, weight; if(!use_bad_cmf) { h = (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (CIE_FINE_SAMPLES - 1.0); lambda = CIE_LAMBDA_MIN + i * h; weight = 3.0 / 8.0 * h; if (i == 0 || i == CIE_FINE_SAMPLES - 1) ; else if ((i - 1) % 3 == 2) weight *= 2.f; else weight *= 3.f; } else { h = (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES; lambda = CIE_LAMBDA_MIN + (i+0.5) * h; weight = h; } double xyz[3] = { cie_interp(cie_x, lambda), cie_interp(cie_y, lambda), cie_interp(cie_z, lambda) }; const double I = cie_interp(illuminant, lambda); #if 0 // output table for shader code double out[3] = {0.0}; for (int k = 0; k < 3; ++k) for (int j = 0; j < 3; ++j) out[k] += xyz_to_rgb[k][j] * xyz[j]; fprintf(stderr, "vec3(%g, %g, %g), // %g nm\n", out[0], out[1], out[2], lambda); #endif lambda_tbl[i] = lambda; for (int k = 0; k < 3; ++k) for (int j = 0; j < 3; ++j) rgb_tbl[k][i] += xyz_to_rgb[k][j] * xyz[j] * I * weight; for (int k = 0; k < 3; ++k) xyz_whitepoint[k] += xyz[k] * I * weight; } } void eval_residual(const double *coeff, const double *rgb, double *residual) { double out[3] = { 0.0, 0.0, 0.0 }; for (int i = 0; i < CIE_FINE_SAMPLES; ++i) { // the optimiser doesn't like nanometers. // we'll do the normalised lambda thing and later convert when we write out. double lambda; if(use_bad_cmf) lambda = (i+.5)/(double)CIE_FINE_SAMPLES; else lambda = i/(double)CIE_FINE_SAMPLES; double cf[3] = {coeff[0], coeff[1], coeff[2]}; /* Polynomial */ double x = 0.0; for (int i = 0; i < 3; ++i) x = x * lambda + cf[i]; /* Sigmoid */ double s = sigmoid(x); /* Integrate against precomputed curves */ for (int j = 0; j < 3; ++j) out[j] += rgb_tbl[j][i] * s; } memcpy(residual, rgb, sizeof(double) * 3); for (int j = 0; j < 3; ++j) residual[j] -= out[j]; } void eval_jacobian(const double *coeffs, const double *rgb, double **jac) { double r0[3], r1[3], tmp[3]; for (int i = 0; i < 3; ++i) { memcpy(tmp, coeffs, sizeof(double) * 3); tmp[i] -= RGB2SPEC_EPSILON; eval_residual(tmp, rgb, r0); memcpy(tmp, coeffs, sizeof(double) * 3); tmp[i] += RGB2SPEC_EPSILON; eval_residual(tmp, rgb, r1); for(int j=0;j<3;j++) assert(r1[j] == r1[j]); for(int j=0;j<3;j++) assert(r0[j] == r0[j]); for (int j = 0; j < 3; ++j) jac[j][i] = (r1[j] - r0[j]) * 1.0 / (2 * RGB2SPEC_EPSILON); } } double gauss_newton(const double rgb[3], double coeffs[3]) { int it = 40;//15; double r = 0; for (int i = 0; i < it; ++i) { double J0[3], J1[3], J2[3], *J[3] = { J0, J1, J2 }; double residual[3]; clamp_coeffs(coeffs); eval_residual(coeffs, rgb, residual); eval_jacobian(coeffs, rgb, J); int P[4]; int rv = LUPDecompose(J, 3, 1e-15, P); if (rv != 1) { fprintf(stdout, "RGB %g %g %g -> %g %g %g\n", rgb[0], rgb[1], rgb[2], coeffs[0], coeffs[1], coeffs[2]); fprintf(stdout, "J0 %g %g %g\n", J0[0], J0[1], J0[2]); fprintf(stdout, "J1 %g %g %g\n", J1[0], J1[1], J1[2]); fprintf(stdout, "J2 %g %g %g\n", J2[0], J2[1], J2[2]); return 666.0; } double x[3]; LUPSolve(J, P, residual, 3, x); r = 0.0; for (int j = 0; j < 3; ++j) { coeffs[j] -= x[j]; r += residual[j] * residual[j]; } if (r < 1e-6) break; } return sqrt(r); } static Gamut parse_gamut(const char *str) { if(!strcasecmp(str, "sRGB")) return SRGB; if(!strcasecmp(str, "eRGB")) return ERGB; if(!strcasecmp(str, "XYZ")) return XYZ; if(!strcasecmp(str, "ProPhotoRGB")) return ProPhotoRGB; if(!strcasecmp(str, "ACES2065_1")) return ACES2065_1; if(!strcasecmp(str, "ACES_AP1")) return ACES_AP1; if(!strcasecmp(str, "REC2020")) return REC2020; return SRGB; } int main(int argc, char **argv) { if (argc < 3) { printf("syntax: createlut <resolution> <output> [<gamut>] [-b]\n" "where <gamut> is one of sRGB,eRGB,XYZ,ProPhotoRGB,ACES2065_1,ACES_AP1,REC2020\n"); exit(-1); } for(int k=1;k<argc;k++) if(!strcmp(argv[k], "-b")) use_bad_cmf = 1; Gamut gamut = XYZ; if(argc > 3) gamut = parse_gamut(argv[3]); init_tables(gamut); const int res = atoi(argv[1]); // resolution of 2d lut typedef struct header_t { uint32_t magic; uint16_t version; uint8_t channels; uint8_t datatype; uint32_t wd; uint32_t ht; } header_t; // read max macadam brightness lut int max_w, max_h; float *max_b = 0; { FILE *f = fopen("macadam.lut", "rb"); header_t header; if(!f) goto mac_error; if(fread(&header, sizeof(header_t), 1, f) != 1) goto mac_error; max_w = header.wd; max_h = header.ht; if(header.channels != 1) goto mac_error; if(header.version != 2) goto mac_error; max_b = calloc(sizeof(float), max_w*max_h); uint16_t *half = calloc(sizeof(float), max_w*max_h); fread(half, header.wd*header.ht, sizeof(uint16_t), f); fclose(f); for(int k=0;k<header.wd*header.ht;k++) max_b[k] = half_to_float(half[k]); free(half); if(0) { mac_error: if(f) fclose(f); fprintf(stderr, "could not read macadam.lut!\n"); exit(2); } } printf("optimising "); int lsres = res/4; float *lsbuf = calloc(sizeof(float), 5*lsres*lsres); size_t bufsize = 5*res*res; float *out = calloc(sizeof(float), bufsize); #if defined(_OPENMP) #pragma omp parallel for schedule(dynamic) shared(stdout,out,max_b,max_w,max_h) #endif for (int j = 0; j < res; ++j) { printf("."); fflush(stdout); for (int i = 0; i < res; ++i) { double x = (i) / (double)res; double y = (j) / (double)res; quad2tri(&x, &y); double rgb[3]; double coeffs[3]; init_coeffs(coeffs); // normalise to max(rgb)=1 rgb[0] = x; rgb[1] = y; rgb[2] = 1.0-x-y; if(check_gamut(rgb)) continue; int ii = (int)fmin(max_w - 1, fmax(0, x * max_w + 0.5)); int jj = (int)fmin(max_h - 1, fmax(0, y * max_h + 0.5)); double m = fmax(0.001, 0.5*max_b[ii + max_w * jj]); double rgbm[3] = {rgb[0] * m, rgb[1] * m, rgb[2] * m}; double resid = gauss_newton(rgbm, coeffs); double c0yl[3]; cvt_c012_c0yl(coeffs, c0yl); (void)resid; int idx = j*res + i; out[5*idx + 0] = coeffs[0]; out[5*idx + 1] = coeffs[1]; out[5*idx + 2] = coeffs[2]; float xy[2] = {x, y}, white[2] = {1.0f/3.0f, 1.0f/3.0f}; // illum E //{.3127266, .32902313}; // D65 float sat = spectrum_saturation(xy, white); // bin into lambda/saturation buffer float satc = lsres * sat; // normalise to extended range: float norm = (c0yl[2] - 400.0)/(700.0-400.0); // float lamc = 1.0/(1.0+exp(-2.0*(2.0*norm-1.0))) * lsres / 2; // center deriv=1 // float fx = norm*norm*norm+norm; float fx = norm-0.5; // fx = fx*fx*fx+fx; // worse float lamc = (0.5 + 0.5 * fx / sqrt(fx*fx+0.25)) * lsres / 2; int lami = fmaxf(0, fminf(lsres/2-1, lamc)); int sati = satc; if(c0yl[0] > 0) lami += lsres/2; lami = fmaxf(0, fminf(lsres-1, lami)); sati = fmaxf(0, fminf(lsres-1, sati)); float olamc = lsbuf[5*(lami*lsres + sati)+3]; float osatc = lsbuf[5*(lami*lsres + sati)+4]; float odist = (olamc - lami - 0.5f)*(olamc - lami - 0.5f)+ (osatc - sati - 0.5f)*(osatc - sati - 0.5f); float dist = ( lamc - lami - 0.5f)*( lamc - lami - 0.5f)+ ( satc - sati - 0.5f)*( satc - sati - 0.5f); if(dist < odist) { lsbuf[5*(lami*lsres + sati)+0] = x; lsbuf[5*(lami*lsres + sati)+1] = y; lsbuf[5*(lami*lsres + sati)+2] = 1.0-x-y; lsbuf[5*(lami*lsres + sati)+3] = lamc; lsbuf[5*(lami*lsres + sati)+4] = satc; } out[5*idx + 3] = (lami+0.5f) / (float)lsres; out[5*idx + 4] = (sati+0.5f) / (float)lsres; } } #ifndef MKSPECTRA // don't write spectra.lut { // scope write abney map on (lambda, saturation) buf_t inpaint_buf = { .dat = lsbuf, .wd = lsres, .ht = lsres, .cpp = 5, }; inpaint(&inpaint_buf); // determine gamut boundaries for rec709 and rec2020: // walk each row and find first time it goes outside. // record this in special 1d tables float *bound_rec709 = calloc(sizeof(float), lsres); float *bound_rec2020 = calloc(sizeof(float), lsres); for(int j=0;j<lsres;j++) { int active = 3; for(int i=0;i<lsres;i++) { int idx = j*lsres + i; double xyz[] = {lsbuf[5*idx], lsbuf[5*idx+1], 1.0-lsbuf[5*idx]-lsbuf[5*idx+1]}; double rec709 [3] = {0.0}; double rec2020[3] = {0.0}; for (int k = 0; k < 3; ++k) for (int l = 0; l < 3; ++l) rec709[k] += xyz_to_srgb[k][l] * xyz[l]; for (int k = 0; k < 3; ++k) for (int l = 0; l < 3; ++l) rec2020[k] += xyz_to_rec2020[k][l] * xyz[l]; if((active & 1) && (rec709 [0] < 0 || rec709 [1] < 0 || rec709 [2] < 0)) { bound_rec709[j] = (i-.5f)/(float)lsres; active &= ~1; } if((active & 2) && (rec2020[0] < 0 || rec2020[1] < 0 || rec2020[2] < 0)) { bound_rec2020[j] = (i-.5f)/(float)lsres; active &= ~2; } if(!active) break; } } // write 2 channel half lut: uint32_t size = 2*sizeof(uint16_t)*lsres*(lsres+1); uint16_t *b16 = malloc(size); // also write pfm for debugging purposes FILE *pfm = fopen(argv[2], "wb"); if(pfm) fprintf(pfm, "PF\n%d %d\n-1.0\n", lsres+1, lsres); for(int j=0;j<lsres;j++) { for(int i=0;i<lsres;i++) { int ki = j*lsres + i, ko = j*(lsres+1) + i; b16[2*ko+0] = float_to_half(lsbuf[5*ki+0]); b16[2*ko+1] = float_to_half(lsbuf[5*ki+1]); float q[] = {lsbuf[5*ki], lsbuf[5*ki+1], 1.0f-lsbuf[5*ki]-lsbuf[5*ki+1]}; if(pfm) fwrite(q, sizeof(float), 3, pfm); } b16[2*(j*(lsres+1)+lsres)+0] = float_to_half(bound_rec709 [j]); b16[2*(j*(lsres+1)+lsres)+1] = float_to_half(bound_rec2020[j]); float q[] = {bound_rec709[j], bound_rec2020[j], 0.0f}; if(pfm) fwrite(q, sizeof(float), 3, pfm); } header_t head = (header_t) { .magic = 1234, .version = 2, .channels = 2, .datatype = 0, .wd = lsres+1, .ht = lsres, }; FILE *f = fopen("abney.lut", "wb"); if(f) { fwrite(&head, sizeof(head), 1, f); fwrite(b16, size, 1, f); fclose(f); } free(b16); free(bound_rec709); free(bound_rec2020); if(pfm) fclose(pfm); } #endif #ifdef MKSPECTRA // write four channel lut only for abridged cmf { // write spectra map: (x,y) |--> sigmoid coeffs + saturation header_t head = (header_t) { .magic = 1234, .version = 2, .channels = 4, .datatype = 1, // 32-bit float .wd = res, .ht = res, }; FILE *pfm = fopen(argv[2], "wb"); // also write pfm for debugging purposes if(pfm) fprintf(pfm, "PF\n%d %d\n-1.0\n", res, res); FILE *f = fopen("spectra.lut", "wb"); if(f) fwrite(&head, sizeof(head), 1, f); for(int k=0;k<res*res;k++) { double coeffs[3] = {out[5*k+0], out[5*k+1], out[5*k+2]}; float q[] = {0, 0, 0, out[5*k+4]}; // c0yl works in half, but doesn't interpolate upon lookup :( quantise_coeffs(coeffs, q); if(f) fwrite(q, sizeof(float), 4, f); if(pfm) fwrite(q, sizeof(float), 3, pfm); } if(f) fclose(f); if(pfm) fclose(pfm); } #endif free(out); printf("\n"); }
o3logon_fmt_plug.c
/* * This software was written by JimF jfoug AT cox dot net * in 2016. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2016 JimF * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_o3logon; #elif FMT_REGISTERS_H john_register_one(&fmt_o3logon); #else #include <string.h> #include <openssl/des.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #include "unicode.h" #include "base64_convert.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 // tuned on core i7 //#define OMP_SCALE 8192 // tuned on K8-Dual HT #endif #endif #include "memdbg.h" #define FORMAT_LABEL "o3logon" #define FORMAT_NAME "Oracle O3LOGON protocol" #define FORMAT_TAG "$o3logon$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA1 DES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define MAX_USERNAME_LEN 30 #define SALT_SIZE (sizeof(ora9_salt)) #define SALT_ALIGN (sizeof(unsigned int)) #define CIPHERTEXT_LENGTH 16 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+32+1+80) //#define DEBUG_ORACLE // // The keys are $ora9i$ user $ auth_sess_key $ auth_pass_key These can be found in sniffed network traffic. static struct fmt_tests tests[] = { {"$o3logon$PASSWORD9$8CF28B36E4F3D2095729CF59510003BF$3078D7DE44385654CC952A9C56E2659B", "password9"}, {"$o3logon$scott$819D062FE5D93F79FF19BDAFE2F9872A$C6D1ED7E6F4D3A6D94F1E49460122D39A3832CC792AD7137", "scottscottscott1"}, {"$o3logon$SCOTT$8E9E3E07864D99BB602C443F45E4AFC1$3591851B327BB85A114BD73D51B80AF58E942002B9612F82", "scottscottscott1234"}, {"$o3logon$scott$4488AFD7905E9966912CA680A3C0A23E$628FBAC5CF0E5548743E16123BF027B9314D7EE8B4E30DB213F683F8D7E786EA", "scottscottscott12345"}, {NULL} }; typedef struct ora9_salt_t { int userlen, auth_pass_len; UTF16 user[MAX_USERNAME_LEN+1]; unsigned char auth_sesskey[16]; unsigned char auth_pass[40]; } ora9_salt; static ora9_salt *cur_salt; static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1]; static char (*plain_key)[PLAINTEXT_LENGTH + 1]; static int *cur_key_len; static int *cracked, any_cracked; static DES_key_schedule desschedule1; // key 0x0123456789abcdef static void init(struct fmt_main *self) { DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1); #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif cur_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key)); plain_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*plain_key)); cur_key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key_len)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(cur_key_len); MEM_FREE(plain_key); MEM_FREE(cur_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *cp; char tmp[32*5+1]; UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2]; int len, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); if (!cp) return 0; // make sure username fits in MAX_USERNAME_LEN UTF16 if (cp-ciphertext > sizeof(tmp)-1) return 0; memcpy(tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp)); if (len < 0 || (len == 0 && cp-ciphertext)) { static int error_shown = 0; #ifdef HAVE_FUZZ if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK)) return 0; #endif if (!error_shown) fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label); error_shown = 1; return 0; } if (len > MAX_USERNAME_LEN) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); if (!cp || cp-ciphertext != 32 || hexlenu(ciphertext, 0) != 32) return 0; ciphertext = cp+1; cp = strchr(ciphertext, '$'); len = strlen(ciphertext); if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[MAX_HASH_LEN*5+1]; strnzcpy(out, ciphertext, MAX_HASH_LEN+1); enc_strupper(&out[FORMAT_TAG_LEN]); return out; } static void set_salt(void *salt) { cur_salt = (ora9_salt *)salt; } static void oracle_set_key(char *key, int index) { UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1]; UTF16 *c; int key_length; strcpy(plain_key[index], key); // Can't use enc_to_utf16_be() because we need to do utf16_uc later key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key)); if (key_length < 0) key_length = strlen16(cur_key_mixedcase); // We convert and uppercase in one shot key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length); // we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase, // and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves. if (key_length < 0) key_length *= -1; cur_key_len[index] = key_length * sizeof(UTF16); // Now byte-swap to UTF16-BE c = cur_key[index]; while((*c = *c << 8 | *c >> 8)) c++; #ifdef DEBUG_ORACLE dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]); #endif } static char *get_key(int index) { return plain_key[index]; } static int ORACLE_TNS_Create_Key_SHA1 (unsigned char *input, int input_len, const unsigned char *Entropy, int EntropyLen, int desired_keylen, unsigned char *out_key) { SHA_CTX ctx; SHA1_Init (&ctx); SHA1_Update (&ctx, input, input_len); SHA1_Update (&ctx, Entropy, EntropyLen); SHA1_Final (out_key, &ctx); SHA1_Init (&ctx); SHA1_Update (&ctx, input, input_len); SHA1_Update (&ctx, "\x2", 1); SHA1_Update (&ctx, &out_key[1], 19); SHA1_Update (&ctx, Entropy, EntropyLen); SHA1_Final (out_key+20, &ctx); return 0; } static int ORACLE_TNS_Decrypt_3DES_CBC (unsigned char* input, int input_len, const unsigned char key[24], unsigned char *decrypted) { DES_key_schedule ks1,ks2,ks3; unsigned char iv[] = {0x80,0x20,0x40,0x04,0x08,0x02,0x10,0x01}; DES_set_key((DES_cblock*) &key[0], &ks1); DES_set_key((DES_cblock*) &key[8], &ks2); DES_set_key((DES_cblock*) &key[16], &ks3); DES_ede3_cbc_encrypt(input,decrypted,input_len,&ks1,&ks2,&ks3,(DES_cblock*) iv,DES_DECRYPT); return 0; } static unsigned char fixed31 [] = {0xA2,0xFB,0xE6,0xAD,0x4C,0x7D,0x1E,0x3D, 0x6E,0xB0,0xB7,0x6C,0x97,0xEF,0xFF,0x84, 0x44,0x71,0x02,0x84,0xAC,0xF1,0x3B,0x29, 0x5C,0x0F,0x0C,0xB1,0x87,0x75,0xEF}; static unsigned char fixed23 [] = {0xF2,0xFF,0x97,0x87,0x15,0x37,0x07,0x76, 0x07,0x27,0xE2,0x7F,0xA3,0xB1,0xD6,0x73, 0x3F,0x2F,0xD1,0x52,0xAB,0xAC,0xC0}; static int ORACLE_TNS_Decrypt_Password_9i (unsigned char OracleHash[8], unsigned char *auth_sesskey, int auto_sesskeylen, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted) { unsigned char triple_des_key[64]; unsigned char sesskey[16]; unsigned char obfuscated[256]; int PassLen = auth_passwordlen; ORACLE_TNS_Create_Key_SHA1 (OracleHash, 8, fixed31, sizeof(fixed31), 24, triple_des_key); ORACLE_TNS_Decrypt_3DES_CBC (auth_sesskey, 16, triple_des_key, sesskey); ORACLE_TNS_Create_Key_SHA1 (sesskey, 16, fixed23, sizeof(fixed23), 24, triple_des_key); ORACLE_TNS_Decrypt_3DES_CBC (auth_password, PassLen, triple_des_key, obfuscated); //ORACLE_TNS_DeObfuscate (triple_des_key, obfuscated, &PassLen); memcpy(decrypted, &obfuscated[PassLen-4], 4); memcpy(&decrypted[4], &obfuscated[4], PassLen-4); return PassLen; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int idx = 0; if (any_cracked) { memset(cracked, 0, sizeof(*cracked) * count); any_cracked = 0; } #ifdef DEBUG_ORACLE dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length); #endif #ifdef _OPENMP #pragma omp parallel for for (idx = 0; idx < count; idx++) #endif { unsigned char buf[256], buf1[256]; unsigned int l; uint32_t iv[2]; DES_key_schedule desschedule2; l = cur_salt->userlen + cur_key_len[idx]; memcpy(buf, cur_salt->user, cur_salt->userlen); memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT); DES_set_key((DES_cblock *)iv, &desschedule2); iv[0] = iv[1] = 0; DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT); #ifdef DEBUG_ORACLE dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8); #endif ORACLE_TNS_Decrypt_Password_9i ((unsigned char*)iv, cur_salt->auth_sesskey, 16, cur_salt->auth_pass, cur_salt->auth_pass_len, buf); if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx]))) { cracked[idx] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static void *get_salt(char *ciphertext) { static ora9_salt salt; UTF8 tmp[MAX_USERNAME_LEN*5+1]; char *cp; memset(&salt, 0, sizeof(salt)); ciphertext += FORMAT_TAG_LEN; cp = strchr(ciphertext, '$'); strncpy((char*)tmp, ciphertext, cp-ciphertext); tmp[cp-ciphertext] = 0; salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext); if (salt.userlen < 0) salt.userlen = strlen16(salt.user); salt.userlen *= 2; base64_convert(cp+1,e_b64_hex,32,salt.auth_sesskey,e_b64_raw,16,0,0); cp = strchr(cp+1, '$') + 1; salt.auth_pass_len = strlen(cp)/2; base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0); return &salt; } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *s = ((UTF16*)salt) + 1; unsigned int hash = 5381; while (*s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int count) { return cracked[count]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_o3logon = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, oracle_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */