source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unop__cosh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cosh_fp64_fp64)
// op(A') function: GB (_unop_tran__cosh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = cosh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = cosh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cosh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = cosh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = cosh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cosh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SP2.c |
/////////////////////////// 8INF854 - ARCHITECTURES PARRALLELES - DEVOIR #2 ///////////////////////////////////
///////////////////////////// SP1.c - Corentin RAOULT - Adrien Cambillau /////////////////////////////////////
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
////////////////////// déclaration des fonctions /////////////////////////
int digit_to_int(char d);
void remplirTABrand(int* TAB, int n);
void afficherTAB(int* TAB, int n);
int* SP2(int* T, int n);
///////////////////// MAIN ////////////////////////////////////////////////
int main(int argc, char* argv[])
{
int n;
n = 102400;
int* S;
int* T = malloc(n*sizeof(int));
remplirTABrand(T,n);
afficherTAB(T,n);
double fin;
double debut = omp_get_wtime();//--> encore un pb, mesure le temps sur un thread
S = SP2(T,n);
fin = omp_get_wtime();
afficherTAB(S,n);
printf("durée = %lf\n", fin - debut);
return EXIT_SUCCESS;
}
/////////////////// développement des fonctions /////////////////////////////////
void remplirTABrand(int* TAB, int n)
{
int i;
srand(time(NULL));
for(i=0;i<n;i++)
TAB[i] = rand()%10000; //limité par unsigned long long int
}
void afficherTAB(int* TAB, int n)
{
int j;
printf("TAB : { ");
for(j = 0; j < n; j++)
{
printf(" [%d] ",TAB[j]);
}
printf(" }\n");
}
int* SP2(int* T, int n)
{
int * S = malloc((n)*sizeof(int));
int i;
int j;
int somme;
#pragma omp parallel for
for(i = 0; i < n ; i++){
somme = 0;
#pragma omp parallel for reduction(+:somme)
for(j = 0; j < i; j++){
somme += T[j];
S[i] = somme;
}
}
return S;
}
|
8041.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp target teams distribute
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp parallel for schedule(dynamic, 8) num_threads(8)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_emult_02.c | //------------------------------------------------------------------------------
// GB_emult_02: C = A.*B where A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C = A.*B where A is sparse/hyper and B is bitmap/full constructs C with
// the same sparsity structure as A. This method can also be called with
// the two input matrices swapped, with flipxy true, to handle the case
// where A is bitmap/full and B is sparse/hyper.
// When no mask is present, or the mask is applied later, this method handles
// the following cases:
// ------------------------------------------
// C = A .* B
// ------------------------------------------
// sparse . sparse bitmap
// sparse . sparse full
// sparse . bitmap sparse
// sparse . full sparse
// If M is sparse/hyper and complemented, it is not passed here:
// ------------------------------------------
// C <!M>= A .* B
// ------------------------------------------
// sparse sparse sparse bitmap (mask later)
// sparse sparse sparse full (mask later)
// sparse sparse bitmap sparse (mask later)
// sparse sparse full sparse (mask later)
// If M is present, it is bitmap/full:
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// sparse bitmap sparse bitmap
// sparse bitmap sparse full
// sparse bitmap bitmap sparse
// sparse bitmap full sparse
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// sparse full sparse bitmap
// sparse full sparse full
// sparse full bitmap sparse
// sparse full full sparse
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// sparse bitmap sparse bitmap
// sparse bitmap sparse full
// sparse bitmap bitmap sparse
// sparse bitmap full sparse
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// sparse full sparse bitmap
// sparse full sparse full
// sparse full bitmap sparse
// sparse full full sparse
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_02 // C=A.*B when A is sparse/hyper, B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // optional mask, unused if NULL
const bool Mask_struct, // if true, use the only structure of M
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input A matrix (sparse/hyper)
const GrB_Matrix B, // input B matrix (bitmap/full)
GrB_BinaryOp op, // op to perform C = op (A,B)
bool flipxy, // if true use fmult(y,x) else fmult(x,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for emult_02", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_02", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_02", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_02", GB0) ;
ASSERT_TYPE_OK (ctype, "ctype for emult_02", GB0) ;
ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B)) ;
ASSERT (M == NULL || GB_IS_BITMAP (B) || GB_IS_FULL (B)) ;
int C_sparsity = GB_sparsity (A) ;
if (M == NULL)
{
GBURBLE ("emult_02:(%s=%s.*%s)",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
}
else
{
GBURBLE ("emult_02:(%s<%s%s%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
Mask_comp ? "!" : "",
GB_sparsity_char_matrix (M),
Mask_struct ? ",struct" : "",
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
}
//--------------------------------------------------------------------------
// revise the operator to handle flipxy
//--------------------------------------------------------------------------
// Replace the ANY operator with SECOND. ANY and SECOND give the same
// result if flipxy is false. However, SECOND is changed to FIRST if
// flipxy is true. This ensures that the results do not depend on the
// sparsity structures of A and B.
if (op->opcode == GB_ANY_binop_code)
{
switch (op->xtype->code)
{
case GB_BOOL_code : op = GrB_SECOND_BOOL ; break ;
case GB_INT8_code : op = GrB_SECOND_INT8 ; break ;
case GB_INT16_code : op = GrB_SECOND_INT16 ; break ;
case GB_INT32_code : op = GrB_SECOND_INT32 ; break ;
case GB_INT64_code : op = GrB_SECOND_INT64 ; break ;
case GB_UINT8_code : op = GrB_SECOND_UINT8 ; break ;
case GB_UINT16_code : op = GrB_SECOND_UINT16 ; break ;
case GB_UINT32_code : op = GrB_SECOND_UINT32 ; break ;
case GB_UINT64_code : op = GrB_SECOND_UINT64 ; break ;
case GB_FP32_code : op = GrB_SECOND_FP32 ; break ;
case GB_FP64_code : op = GrB_SECOND_FP64 ; break ;
case GB_FC32_code : op = GxB_SECOND_FC32 ; break ;
case GB_FC64_code : op = GxB_SECOND_FC64 ; break ;
default: ;
}
}
if (flipxy)
{
bool handled ;
op = GB_flip_op (op, &handled) ;
if (handled) flipxy = false ;
}
ASSERT_BINARYOP_OK (op, "final op for emult_02", GB0) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int8_t *restrict Mb = (M == NULL) ? NULL : M->b ;
const GB_void *restrict Mx = (M == NULL || Mask_struct) ? NULL :
(const GB_void *) M->x ;
const size_t msize = (M == NULL) ? 0 : M->type->size ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t vlen = A->vlen ;
const int64_t vdim = A->vdim ;
const int64_t nvec = A->nvec ;
const int64_t anz = GB_nnz (A) ;
const int8_t *restrict Bb = B->b ;
const bool B_is_bitmap = GB_IS_BITMAP (B) ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, true, // sparse or hyper (same as A), static header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, A->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the input matrix A
//--------------------------------------------------------------------------
int A_nthreads, A_ntasks ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
GB_SLICE_MATRIX (A, 8, chunk) ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
C->nvec_nonempty = A->nvec_nonempty ;
C->nvec = nvec ;
const bool C_has_pattern_of_A = !B_is_bitmap && (M == NULL) ;
if (!C_has_pattern_of_A)
{
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + A_ntasks ;
Cp_kfirst = Work + A_ntasks * 2 ;
//----------------------------------------------------------------------
// count entries in C
//----------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
if (M == NULL)
{
//------------------------------------------------------------------
// Method2(a): C = A.*B where A is sparse/hyper and B is bitmap
//------------------------------------------------------------------
ASSERT (B_is_bitmap) ;
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pB_start = j * vlen ;
int64_t pA, pA_end ;
GB_get_pA (&pA, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, vlen) ;
int64_t cjnz = 0 ;
for ( ; pA < pA_end ; pA++)
{
cjnz += Bb [pB_start + Ai [pA]] ;
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
}
else
{
//------------------------------------------------------------------
// Method2(c): C<#M> = A.*B; M, B bitmap/full, A is sparse/hyper
//------------------------------------------------------------------
ASSERT (M != NULL) ;
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pB_start = j * vlen ;
int64_t pA, pA_end ;
GB_get_pA (&pA, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, vlen) ;
int64_t cjnz = 0 ;
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t pB = pB_start + i ;
bool mij = GBB (Mb, pB) && GB_mcast (Mx, pB, msize) ;
mij = mij ^ Mask_comp ;
cjnz += (mij && GBB (Bb, pB)) ;
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
}
//----------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//----------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, A_ek_slicing, A_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ;
}
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = (C_has_pattern_of_A) ? anz : Cp [nvec] ;
// set C->iso = C_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead of memcpy
if (GB_IS_HYPERSPARSE (A))
{
// copy A->h into C->h
GB_memcpy (C->h, Ah, nvec * sizeof (int64_t), A_nthreads) ;
}
if (C_has_pattern_of_A)
{
// Method2(b): B is full and no mask present, so the pattern of C is
// the same as the pattern of A
GB_memcpy (Cp, Ap, (nvec+1) * sizeof (int64_t), A_nthreads) ;
GB_memcpy (C->i, Ai, cnz * sizeof (int64_t), A_nthreads) ;
}
C->jumbled = A->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
// if flipxy was true on input and the op is positional, FIRST, SECOND, or
// PAIR, the op has already been flipped, so these tests do not have to
// consider that case.
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_binop_code) ;
bool op_is_second = (opcode == GB_SECOND_binop_code) ;
bool op_is_pair = (opcode == GB_PAIR_binop_code) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
#define GB_PHASE_2_OF_2
bool done = false ;
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso
//----------------------------------------------------------------------
// Cx [0] = cscalar = op (A,B)
GB_BURBLE_MATRIX (C, "(iso emult) ") ;
memcpy (C->x, cscalar, csize) ;
// pattern of C = set intersection of pattern of A and B
// flipxy is ignored since the operator is not applied
#define GB_ISO_EMULT
#include "GB_emult_02_template.c"
done = true ;
}
else
{
#ifndef GBCOMPACT
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_AemultB_02(mult,xname) GB (_AemultB_02_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_02(mult,xname) (C, \
M, Mask_struct, Mask_comp, A, B, flipxy, \
Cp_kfirst, A_ek_slicing, A_ntasks, A_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
#endif
}
//--------------------------------------------------------------------------
// generic worker
//--------------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_02: %s) ", op->name) ;
int ewise_method = flipxy ? GB_EMULT_METHOD3 : GB_EMULT_METHOD2 ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, ewise_method, Cp_kfirst,
NULL, 0, 0, A_ek_slicing, A_ntasks, A_nthreads, NULL, 0, 0,
M, Mask_struct, Mask_comp, A, B, Context) ;
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
ASSERT_MATRIX_OK (C, "C output for emult_02", GB0) ;
return (GrB_SUCCESS) ;
}
|
SerializerIO_WaveletCompression_MPI_Simple.h | /*
* SerializerIO_WaveletCompression_MPI_Simple.h
* CubismZ
*
* Copyright 2018 ETH Zurich. All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#ifndef SERIALIZERIO_WAVELETCOMPRESSION_MPI_SIMPLE_H_
#define SERIALIZERIO_WAVELETCOMPRESSION_MPI_SIMPLE_H_
#endif
#include <typeinfo>
#include <sstream>
#include <numeric>
#ifdef _OPENMP
#include <omp.h>
#else
static int omp_get_max_threads(void) { return 1;}
static int omp_get_thread_num(void) { return 0; }
#endif
#include <Timer.h>
#include <map>
using namespace std;
#include "WaveletSerializationTypes.h"
#include "FullWaveletTransform.h"
#include "WaveletCompressor.h"
#include "CompressionEncoders.h"
//#define _WRITE_AT_ALL_ 1 // peh:
#if defined(_USE_ZEROBITS_)
void float_zero_bits(unsigned int *ul, int n)
{
unsigned int _ul = *ul;
if (n == 0)
_ul = _ul;
else if (n == 4)
_ul = _ul & 0xfffffff0;
else if (n == 8)
_ul = _ul & 0xffffff00;
else if (n == 12)
_ul = _ul & 0xfffff000;
else if (n == 16)
_ul = _ul & 0xffff0000;
*ul = _ul;
}
#endif
template<typename GridType, typename IterativeStreamer>
class SerializerIO_WaveletCompression_MPI_SimpleBlocking
{
protected:
enum
{
NCHANNELS = IterativeStreamer::channels,
NPTS = _BLOCKSIZE_ * _BLOCKSIZE_ * _BLOCKSIZE_
};
enum //some considerations about the per-thread working set
{
DESIREDMEM = (4 * 1024) * 1024,
ENTRYSIZE = sizeof(WaveletCompressor) + sizeof(int),
ENTRIES_CANDIDATE = DESIREDMEM / ENTRYSIZE,
ENTRIES = ENTRIES_CANDIDATE ? ENTRIES_CANDIDATE : 1,
BUFFERSIZE = ENTRIES * ENTRYSIZE,
ALERT = (ENTRIES - 1) * ENTRYSIZE
};
struct CompressionBuffer
{
BlockMetadata hotblocks[ENTRIES];
unsigned char compressedbuffer[2*BUFFERSIZE];
};
struct TimingInfo { float total, fwt, encoding; };
string binaryocean_title, binarylut_title, header;
vector< BlockMetadata > myblockindices; //tells in which compressed chunk is any block, nblocks
HeaderLUT lutheader;
vector< size_t > lut_compression; //tells the number of compressed chunk, and where do they start, nchunks + 2
vector< unsigned char > allmydata; //buffer with the compressed data
size_t written_bytes, pending_writes, completed_writes;
Real threshold;
bool halffloat, verbosity;
int wtype_read, wtype_write; // peh
vector< float > workload_total, workload_fwt, workload_encode; //per-thread cpu time for imbalance insight for fwt and encoding
vector<CompressionBuffer> workbuffer; //per-thread compression buffer
float _encode_and_flush(unsigned char inputbuffer[], long& bufsize, const long maxsize, BlockMetadata metablocks[], int& nblocks)
{
//0. setup
//1. compress the data with zlib, obtain zptr, zbytes
//2. obtain an offset from allmydata -> dstoffset
//3. obtain a new entry in lut_compression -> idcompression
//4. copy the [zptr,zptr+zbytes] into in allmydata, starting from dstoffset
//5. for all blocks, set the myblockindices[blockids[i]] to a valid state
//6. set nblocks to zero
Timer timer; timer.start();
const unsigned char * const zptr = inputbuffer;
size_t zbytes = bufsize;
size_t dstoffset = -1;
int idcompression = -1;
//1.
/* ZLIB/LZ4 (LOSSLESS COMPRESSION) */
{
/* TODO: replace the following code with: zbytes = zcompress(inputbuffer, bufsize, maxsize); */
z_stream myzstream = {0};
deflateInit(&myzstream, Z_DEFAULT_COMPRESSION); // Z_BEST_COMPRESSION
//deflateInit(&myzstream, Z_BEST_COMPRESSION);
unsigned mah = maxsize;
int err = deflate_inplace(&myzstream, inputbuffer, bufsize, &mah);
assert(err == Z_OK);
zbytes = myzstream.total_out;
// fixed memory leak, Fabian (08 Oct 2014)
err = deflateEnd(&myzstream);
assert(err == Z_OK);
}
//2-3.
#pragma omp critical
{
dstoffset = written_bytes;
written_bytes += zbytes;
//exception: we have to resize allmydata
if (written_bytes > allmydata.size())
{
//yield-based wait until writes complete
while (pending_writes != completed_writes) sched_yield();
//safely resize
printf("resizing allmydata to %ld bytes\n", written_bytes);
allmydata.resize(written_bytes);
}
idcompression = lut_compression.size();
lut_compression.push_back(dstoffset);
++pending_writes;
}
//4.
assert(allmydata.size() >= written_bytes);
memcpy(&allmydata.front() + dstoffset, zptr, zbytes);
#pragma omp atomic
++completed_writes;
//5.
for(int i = 0; i < nblocks; ++i)
{
const int entry = metablocks[i].idcompression;
assert(entry >= 0 && entry < myblockindices.size());
myblockindices[entry] = metablocks[i];
myblockindices[entry].idcompression = idcompression;
myblockindices[entry].subid = i;
}
//6.
bufsize = 0;
nblocks = 0;
return timer.stop();
}
template<int channel>
void _compress(const vector<BlockInfo>& vInfo, const int NBLOCKS, IterativeStreamer streamer)
{
int compress_threads = omp_get_max_threads();
if (compress_threads < 1) compress_threads = 1;
#pragma omp parallel
{
const int tid = omp_get_thread_num();
CompressionBuffer & mybuf = workbuffer[tid];
long mybytes = 0; int myhotblocks = 0;
float tfwt = 0, tencode = 0;
Timer timer;
timer.start();
#pragma omp for
for(int i = 0; i < NBLOCKS; ++i)
{
Timer tw; tw.start();
//wavelet compression
{
FluidBlock& b = *(FluidBlock*)vInfo[i].ptrBlock;
WaveletCompressor compressor;
Real * const mysoabuffer = &compressor.uncompressed_data()[0][0][0];
if(streamer.name() == "StreamerGridPointIterative")
{
for(int iz=0; iz<FluidBlock::sizeZ; iz++)
for(int iy=0; iy<FluidBlock::sizeY; iy++)
for(int ix=0; ix<FluidBlock::sizeX; ix++)
mysoabuffer[ix + _BLOCKSIZE_ * (iy + _BLOCKSIZE_ * iz)] = streamer.template operate<channel>(b(ix, iy, iz));
}
else
{
IterativeStreamer mystreamer(b);
for(int iz=0; iz<FluidBlock::sizeZ; iz++)
for(int iy=0; iy<FluidBlock::sizeY; iy++)
for(int ix=0; ix<FluidBlock::sizeX; ix++)
mysoabuffer[ix + _BLOCKSIZE_ * (iy + _BLOCKSIZE_ * iz)] = mystreamer.operate(ix, iy, iz);
}
//wavelet digestion
#if defined(_USE_WAVZ_)
const int nbytes = (int)compressor.compress(this->threshold, this->halffloat, this->wtype_write);
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#elif defined(_USE_FPZIP_)
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes;
int is_float = (sizeof(Real)==4)? 1 : 0;
int layout[4] = {_BLOCKSIZE_, _BLOCKSIZE_, _BLOCKSIZE_, 1};
int fpzip_prec = (int)this->threshold;
fpz_compress3D((void *)mysoabuffer, inbytes, layout, (void *) compressor.compressed_data(), (unsigned int *)&nbytes, is_float, fpzip_prec);
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#elif defined(_USE_ZFP_)
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes;
double zfp_acc = this->threshold;
int is_float = (sizeof(Real)==4)? 1 : 0;
int layout[4] = {_BLOCKSIZE_, _BLOCKSIZE_, _BLOCKSIZE_, 1};
size_t nbytes_zfp;
int status = zfp_compress_buffer(mysoabuffer, layout[0], layout[1], layout[2], zfp_acc, is_float, (unsigned char *)compressor.compressed_data(), &nbytes_zfp);
nbytes = nbytes_zfp;
#if VERBOSE
printf("zfp_compress status = %d, from %d to %d bytes = %d\n", status, inbytes, nbytes);
#endif
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#elif defined(_USE_SZ_)
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes;
int is_float = (sizeof(Real)==4)? 1 : 0;
double sz_abs_acc = 0.0;
double sz_rel_acc = 0.0;
double sz_pwr_acc = 0.0;
int sz_pwr_type = SZ_PWR_MAX_TYPE;
if(getenv("SZ_ABS_ACC")) sz_abs_acc = atof(getenv("SZ_ABS_ACC"));
sz_abs_acc = (double) this->threshold;
int layout[4] = {_BLOCKSIZE_, _BLOCKSIZE_, _BLOCKSIZE_, 1};
size_t *bytes_sz = (size_t *)malloc(sizeof(size_t));
unsigned char *compressed_sz = SZ_compress_args(is_float? SZ_FLOAT:SZ_DOUBLE, (unsigned char *)mysoabuffer, bytes_sz, ABS, sz_abs_acc, sz_rel_acc, sz_pwr_acc, sz_pwr_type, 0, 0, layout[2], layout[1], layout[0]);
nbytes = *bytes_sz;
memcpy(compressor.compressed_data(), compressed_sz, nbytes);
free(bytes_sz);
free(compressed_sz);
#if VERBOSE
printf("SZ_compress_args: from %d to %d bytes\n", inbytes, nbytes);
#endif
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#else /* NO COMPRESSION */
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes = inbytes;
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
#if defined(_USE_ZEROBITS_)
// set some bits to zero
for(int iz=0; iz<FluidBlock::sizeZ; iz++)
for(int iy=0; iy<FluidBlock::sizeY; iy++)
for(int ix=0; ix<FluidBlock::sizeX; ix++)
float_zero_bits((unsigned int *)&mysoabuffer[ix + _BLOCKSIZE_ * (iy + _BLOCKSIZE_ * iz)], _ZEROBITS_);
#endif
memcpy(mybuf.compressedbuffer + mybytes, mysoabuffer, sizeof(unsigned char) * nbytes);
#endif
mybytes += nbytes;
}
tfwt += tw.stop();
//building the meta data
{
BlockMetadata curr = { i, myhotblocks, vInfo[i].index[0], vInfo[i].index[1], vInfo[i].index[2]};
mybuf.hotblocks[myhotblocks] = curr;
myhotblocks++;
}
if (mybytes >= ALERT || myhotblocks >= ENTRIES)
tencode = _encode_and_flush(mybuf.compressedbuffer, mybytes, (long)BUFFERSIZE, mybuf.hotblocks, myhotblocks);
}
if (mybytes > 0)
tencode = _encode_and_flush(mybuf.compressedbuffer, mybytes, (long)BUFFERSIZE, mybuf. hotblocks, myhotblocks);
workload_total[tid] = timer.stop();
workload_fwt[tid] = tfwt;
workload_encode[tid] = tencode;
}
}
virtual void _to_file(const MPI_Comm mycomm, const string fileName)
{
int mygid;
int nranks;
MPI_Comm_rank(mycomm, &mygid);
MPI_Comm_size(mycomm, &nranks);
MPI_Info myfileinfo;
MPI_Info_create(&myfileinfo);
string key("access_style");
string val("write_once");
MPI_Info_set(myfileinfo, (char*)key.c_str(), (char*)val.c_str());
MPI_File myfile;
#if defined(_WRITE_AT_ALL_)
MPI_File_open(mycomm, (char*)fileName.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, myfileinfo, &myfile);
#else
MPI_File_open(MPI_COMM_SELF, (char*)fileName.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, myfileinfo, &myfile);
#endif
MPI_Info_free(&myfileinfo);
size_t current_displacement = 0;
//write the mini-header
{
size_t blank_address = -1;
const int miniheader_bytes = sizeof(blank_address) + binaryocean_title.size();
current_displacement += miniheader_bytes;
}
//write the buffer - alias the binary ocean
{
// apparently this suck:
// myfile.Seek_shared(current_displacement, MPI_SEEK_SET);
// myfile.Write_ordered(&allmydata.front(), written_bytes, MPI_CHAR);
// current_displacement = myfile.Get_position_shared();
// so here we do it manually. so nice!
size_t myfileoffset = 0;
MPI_Exscan(&written_bytes, &myfileoffset, 1, MPI_UINT64_T, MPI_SUM, mycomm);
if (mygid == 0)
myfileoffset = 0;
MPI_Status status;
#if defined(_WRITE_AT_ALL_)
MPI_File_write_at_all(myfile, current_displacement + myfileoffset, &allmydata.front(), written_bytes, MPI_CHAR, &status);
#else
MPI_File_write_at(myfile, current_displacement + myfileoffset, &allmydata.front(), written_bytes, MPI_CHAR, &status);
#endif
//here we update current_displacement by broadcasting the total written bytes from rankid = nranks -1
size_t total_written_bytes = myfileoffset + written_bytes;
MPI_Bcast(&total_written_bytes, 1, MPI_UINT64_T, nranks - 1, mycomm);
current_displacement += total_written_bytes;
}
//go back at the blank address and fill it with the displacement
if (mygid == 0)
{
MPI_Status status;
MPI_File_write(myfile, ¤t_displacement, sizeof(current_displacement), MPI_CHAR, &status);
MPI_File_write(myfile, (void*)binaryocean_title.c_str(), binaryocean_title.size(), MPI_CHAR, &status);
}
//write the header
{
const size_t header_bytes = header.size();
MPI_Status status;
if (mygid == 0)
MPI_File_write_at(myfile, current_displacement, (void*)header.c_str(), header_bytes, MPI_CHAR, &status);
current_displacement += header_bytes;
}
//write block metadata
{
const int metadata_bytes = myblockindices.size() * sizeof(BlockMetadata);
MPI_Status status;
#if defined(_WRITE_AT_ALL_)
MPI_File_write_at_all(myfile, current_displacement + mygid * metadata_bytes, &myblockindices.front(), metadata_bytes, MPI_CHAR, &status);
#else
MPI_File_write_at(myfile, current_displacement + mygid * metadata_bytes, &myblockindices.front(), metadata_bytes, MPI_CHAR, &status);
#endif
current_displacement += metadata_bytes * nranks;
}
//write the lut title
{
const int title_bytes = binarylut_title.size();
MPI_Status status;
if (mygid == 0)
MPI_File_write_at(myfile, current_displacement, (void*)binarylut_title.c_str(), title_bytes, MPI_CHAR, &status);
current_displacement += title_bytes;
}
//write the local buffer entries
{
assert(lut_compression.size() == 0);
const int lutheader_bytes = sizeof(lutheader);
MPI_Status status;
#if defined(_WRITE_AT_ALL_)
MPI_File_write_at_all(myfile, current_displacement + mygid * lutheader_bytes, &lutheader, lutheader_bytes, MPI_CHAR, &status);
#else
MPI_File_write_at(myfile, current_displacement + mygid * lutheader_bytes, &lutheader, lutheader_bytes, MPI_CHAR, &status);
#endif
}
MPI_File_close(&myfile); //bon voila tu vois ou quoi
}
float _profile_report(const char * const workload_name, vector<float>& workload, const MPI_Comm mycomm, bool isroot)
{
float tmin = *std::min_element(workload.begin(), workload.end());
float tmax = *std::max_element(workload.begin(), workload.end());
float tsum = std::accumulate(workload.begin(), workload.end(), 0.f);
MPI_Reduce(isroot ? MPI_IN_PLACE : &tmin, &tmin, 1, MPI_FLOAT, MPI_MIN, 0, mycomm);
MPI_Reduce(isroot ? MPI_IN_PLACE : &tsum, &tsum, 1, MPI_FLOAT, MPI_SUM, 0, mycomm);
MPI_Reduce(isroot ? MPI_IN_PLACE : &tmax, &tmax, 1, MPI_FLOAT, MPI_MAX, 0, mycomm);
int comm_size;
MPI_Comm_size(mycomm, &comm_size);
tsum /= comm_size * workload.size();
if (isroot)
printf("TLP %-10s min:%.3e s avg:%.3e s max:%.3e s imb:%.0f%%\n",
workload_name, tmin, tsum, tmax, (tmax - tmin) / tsum * 100);
return tsum;
}
template<int channel>
void _write(GridType & inputGrid, string fileName, IterativeStreamer streamer)
{
const vector<BlockInfo> infos = inputGrid.getBlocksInfo();
const int NBLOCKS = infos.size();
//prepare the headers
{
this->binaryocean_title = "\n==============START-BINARY-OCEAN==============\n";
this->binarylut_title = "\n==============START-BINARY-LUT==============\n";
{
const int xtotalbpd = inputGrid.getBlocksPerDimension(0);
const int ytotalbpd = inputGrid.getBlocksPerDimension(1);
const int ztotalbpd = inputGrid.getBlocksPerDimension(2);
const int xbpd = inputGrid.getResidentBlocksPerDimension(0);
const int ybpd = inputGrid.getResidentBlocksPerDimension(1);
const int zbpd = inputGrid.getResidentBlocksPerDimension(2);
const double xExtent = inputGrid.getH()*xtotalbpd*_BLOCKSIZE_;
const double yExtent = inputGrid.getH()*ytotalbpd*_BLOCKSIZE_;
const double zExtent = inputGrid.getH()*ztotalbpd*_BLOCKSIZE_;
std::stringstream ss;
ss << "\n==============START-ASCI-HEADER==============\n";
{
int one = 1;
bool isone = *(char *)(&one);
ss << "Endianess: " << (isone ? "little" : "big") << "\n";
}
ss << "sizeofReal: " << sizeof(Real) << "\n";
ss << "sizeofsize_t: " << sizeof(size_t) << "\n";
ss << "sizeofBlockMetadata: " << sizeof(BlockMetadata) << "\n";
ss << "sizeofHeaderLUT: " << sizeof(HeaderLUT) << "\n";
ss << "sizeofCompressedBlock: " << sizeof(CompressedBlock) << "\n";
ss << "Blocksize: " << _BLOCKSIZE_ << "\n";
ss << "Blocks: " << xtotalbpd << " x " << ytotalbpd << " x " << ztotalbpd << "\n";
ss << "Extent: " << xExtent << " " << yExtent << " " << zExtent << "\n";
ss << "SubdomainBlocks: " << xbpd << " x " << ybpd << " x " << zbpd << "\n";
ss << "HalfFloat: " << (this->halffloat ? "yes" : "no") << "\n";
#if defined(_USE_WAVZ_)
ss << "Wavelets: " << WaveletsOnInterval::ChosenWavelets_GetName(this->wtype_write) << "\n";
#elif defined(_USE_FPZIP_)
ss << "Wavelets: " << "fpzip" << "\n";
#elif defined(_USE_ZFP_)
ss << "Wavelets: " << "zfp" << "\n";
#elif defined(_USE_SZ_)
ss << "Wavelets: " << "sz" << "\n";
#else
ss << "Wavelets: " << "none" << "\n";
#endif
ss << "WaveletThreshold: " << threshold << "\n";
#if defined(_USE_ZLIB_)
ss << "Encoder: " << "zlib" << "\n";
#elif defined(_USE_LZ4_)
ss << "Encoder: " << "lz4" << "\n";
#else
ss << "Encoder: " << "none" << "\n";
#endif
ss << "==============START-BINARY-METABLOCKS==============\n";
this->header = ss.str();
}
}
//compress my data, prepare for serialization
{
double t0 = MPI_Wtime();
written_bytes = pending_writes = completed_writes = 0;
if (allmydata.size() == 0)
{
//const size_t speculated_compression_rate = 10;
allmydata.resize(NBLOCKS * sizeof(Real) * NPTS + 4*1024*1024);
}
myblockindices.clear();
myblockindices.resize(NBLOCKS);
lut_compression.clear();
_compress<channel>(infos, infos.size(), streamer);
//manipulate the file data (allmydata, lut_compression, myblockindices)
//so that they are file-friendly
{
const int nchunks = lut_compression.size();
const size_t extrabytes = lut_compression.size() * sizeof(size_t);
const char * const lut_ptr = (char *)&lut_compression.front();
allmydata.insert(allmydata.begin() + written_bytes, lut_ptr, lut_ptr + extrabytes);
lut_compression.clear();
HeaderLUT newvalue = { written_bytes + extrabytes, nchunks };
lutheader = newvalue;
written_bytes += extrabytes;
}
double t1 = MPI_Wtime();
printf("SerializerIO_WaveletCompression_MPI_Simple.h: compress+serialization %f seconds\n", t1-t0);
}
double io_t0 = MPI_Wtime();
///
const MPI_Comm mycomm = inputGrid.getCartComm();
int mygid;
int comm_size;
MPI_Comm_rank(mycomm, &mygid);
MPI_Comm_size(mycomm, &comm_size);
//write into the file
Timer timer; timer.start();
if (getenv("CUBISMZ_NOIO") == NULL)
_to_file(mycomm, fileName);
vector<float> workload_file(1, timer.stop());
///
double io_t1 = MPI_Wtime();
#if VERBOSE
printf("SerializerIO_WaveletCompression_MPI_Simple.h: _to_file %f seconds\n", io_t1-io_t0);
#endif
//just a report now
if (verbosity)
{
size_t aggregate_written_bytes = -1;
MPI_Reduce(&written_bytes, &aggregate_written_bytes, 1, MPI_UINT64_T, MPI_SUM, 0, mycomm);
const bool isroot = mygid == 0;
if (mygid == 0)
printf("Channel %d: %.2f kB, wavelet-threshold: %.1e, compr. rate: %.2f\n",
channel, aggregate_written_bytes/1024.,
threshold, NPTS * sizeof(Real) * NBLOCKS * comm_size / (float) aggregate_written_bytes);
#if VERBOSE
const float tavgcompr = _profile_report("Compr", workload_total, mycomm, isroot);
const float tavgfwt =_profile_report("FWT+decim", workload_fwt, mycomm, isroot);
const float tavgenc =_profile_report("Encoding", workload_encode, mycomm, isroot);
const float tavgio =_profile_report("FileIO", workload_file, mycomm, isroot);
const float toverall = tavgio + tavgcompr;
if (isroot)
{
printf("Time distribution: %5s:%.0f%% %5s:%.0f%% %5s:%.0f%% %5s:%.0f%%\n",
"FWT ", tavgfwt / toverall * 100,
"ENC ", tavgenc / toverall * 100,
"IO ", tavgio / toverall * 100,
"Other", (tavgcompr - tavgfwt - tavgenc)/ toverall * 100);
}
#endif
}
}
void _read(string path)
{
//THE FIRST PART IS SEQUENTIAL
//THE SECOND ONE IS RANDOM ACCESS
size_t global_header_displacement = -1;
int NBLOCKS = -1;
int totalbpd[3] = {-1, -1, -1};
int bpd[3] = { -1, -1, -1};
string binaryocean_title = "\n==============START-BINARY-OCEAN==============\n";
const int miniheader_bytes = sizeof(size_t) + binaryocean_title.size();
vector<BlockMetadata> metablocks;
//random access data structures: meta2subchunk, lutchunks;
map<int, map<int, map<int, CompressedBlock > > > meta2subchunk;
vector<size_t> lutchunks;
{
FILE * file = fopen(path.c_str(), "rb");
assert(file);
//reading the header and mini header
{
size_t header_displacement = -1;
fread(&header_displacement, sizeof(size_t), 1, file);
fseek(file, header_displacement, SEEK_SET);
global_header_displacement = header_displacement;
char buf[1024];
fgets(buf, sizeof(buf), file);
fgets(buf, sizeof(buf), file);
assert(string("==============START-ASCI-HEADER==============\n") == string(buf));
fscanf(file, "Endianess: %s\n", buf);
{
int one = 1;
bool isone = *(char *)(&one);
if (isone)
assert(string(buf) == "little");
else
assert(string(buf) == "big");
}
int sizeofreal = -1;
fscanf(file, "sizeofReal: %d\n", &sizeofreal);
assert(sizeof(Real) == sizeofreal);
int bsize = -1;
fscanf(file, "Blocksize: %d\n", &bsize);
assert(bsize == _BLOCKSIZE_);
fscanf(file, "Blocks: %d x %d x %d\n", totalbpd, totalbpd + 1, totalbpd + 2);
fscanf(file, "SubdomainBlocks: %d x %d x %d\n", bpd, bpd + 1, bpd + 2);
fscanf(file, "HalfFloat: %s\n", buf);
this->halffloat = (string(buf) == "yes");
fscanf(file, "Wavelets: %s\n", buf);
assert(buf == string(WaveletsOnInterval::ChosenWavelets_GetName(this->wtype_read)));
fscanf(file, "Encoder: %s\n", buf);
#if defined(_USE_ZLIB_)
assert(buf == string("zlib"));
#else /* _USE_LZ4_ */
assert(buf == string("lz4"));
#endif
fgets(buf, sizeof(buf), file);
assert(string("==============START-BINARY-METABLOCKS==============\n") == string(buf));
NBLOCKS = totalbpd[0] * totalbpd[1] * totalbpd[2];
}
//reading the binary lut
{
metablocks.resize(NBLOCKS);
for(int i = 0; i < NBLOCKS; ++i)
{
BlockMetadata entry;
fread(&entry, sizeof(entry), 1, file);
assert(entry.idcompression >= 0 && entry.idcompression < bpd[0] * bpd[1] * bpd[2]);
metablocks[i] = entry;
}
}
//reading the compression lut
{
char buf[1024];
fgetc(file);
fgets(buf, sizeof(buf), file);
assert(string("==============START-BINARY-LUT==============\n") == string(buf));
//bool done = false;
size_t base = miniheader_bytes;
const int BPS = bpd[0] * bpd[1] * bpd[2];
assert(NBLOCKS % BPS == 0);
const int SUBDOMAINS = NBLOCKS / BPS;
vector<HeaderLUT> headerluts(SUBDOMAINS); //oh mamma mia
fread(&headerluts.front(), sizeof(HeaderLUT), SUBDOMAINS, file);
for(int s = 0, currblock = 0; s < SUBDOMAINS; ++s)
{
const int nglobalchunks = lutchunks.size();
assert(!feof(file));
const int nchunks = headerluts[s].nchunks;
const size_t myamount = headerluts[s].aggregate_bytes;
const size_t lutstart = base + myamount - sizeof(size_t) * nchunks;
//read the lut
fseek(file, lutstart, SEEK_SET);
vector<size_t> mylut(nchunks);
fread(&mylut.front(), sizeof(size_t), nchunks, file);
for(int i=0; i< nchunks; ++i)
assert(mylut[i] < myamount);
for(int i=1; i< nchunks; ++i)
assert(mylut[i-1] < mylut[i]);
//compute the global positioning of the compressed chunks within the file
for(int i = 0; i < mylut.size(); ++i)
{
assert(mylut[i] < myamount);
mylut[i] += base;
}
assert(myamount > 0);
base += myamount;
assert(base <= global_header_displacement);
//compute the base for this blocks
for(int i = 0; i < BPS; ++i, ++currblock)
metablocks[currblock].idcompression += nglobalchunks;
lutchunks.insert(lutchunks.end(), mylut.begin(), mylut.end());
}
assert(base == global_header_displacement);
lutchunks.push_back(base);
{
int c = fgetc(file);
do
{
//printf("shouldnt be here! 0x%x\n", c);
c = fgetc(file);
}
while (! feof(file) );
}
}
fclose(file);
}
for(int i = 0; i < NBLOCKS ; ++i)
{
BlockMetadata entry = metablocks[i];
assert(entry.idcompression >= 0);
assert(entry.idcompression < lutchunks.size()-1);
size_t start_address = lutchunks[entry.idcompression];
size_t end_address = lutchunks[entry.idcompression + 1];
assert(start_address < end_address);
assert(end_address <= global_header_displacement);
assert( start_address < global_header_displacement );
CompressedBlock compressedblock = { start_address, end_address - start_address, entry.subid };
meta2subchunk[entry.iz][entry.iy][entry.ix] = compressedblock;
}
//at this point we can access every block
{
//lets try with block 7, 3, 5
FILE * f = fopen(path.c_str(), "rb");
assert(f);
const int ix = 0;
const int iy = 2;
const int iz = 0;
assert(ix >= 0 && ix < totalbpd[0]);
assert(iy >= 0 && iy < totalbpd[1]);
assert(iz >= 0 && iz < totalbpd[2]);
assert(meta2subchunk.find(iz) != meta2subchunk.end());
assert(meta2subchunk[iz].find(iy) != meta2subchunk[iz].end());
assert(meta2subchunk[iz][iy].find(ix) != meta2subchunk[iz][iy].end());
CompressedBlock compressedchunk = meta2subchunk[iz][iy][ix];
size_t start = compressedchunk.start;
assert(start >= miniheader_bytes);
assert(start < global_header_displacement);
assert(start + compressedchunk.extent < global_header_displacement);
vector<unsigned char> compressedbuf(compressedchunk.extent);
fseek(f, compressedchunk.start, SEEK_SET);
fread(&compressedbuf.front(), compressedchunk.extent, 1, f);
assert(!feof(f));
vector<unsigned char> waveletbuf(4 << 20);
const size_t decompressedbytes = zdecompress(&compressedbuf.front(), compressedbuf.size(), &waveletbuf.front(), waveletbuf.size());
int readbytes = 0;
for(int i = 0; i<compressedchunk.subid; ++i)
{
int nbytes = *(int *)&waveletbuf[readbytes];
readbytes += sizeof(int);
assert(readbytes <= decompressedbytes);
//printf("scanning nbytes...%d\n", nbytes);
readbytes += nbytes;
assert(readbytes <= decompressedbytes);
}
Real MYBLOCK[_BLOCKSIZE_][_BLOCKSIZE_][_BLOCKSIZE_];
{
int nbytes = *(int *)&waveletbuf[readbytes];
readbytes += sizeof(int);
assert(readbytes <= decompressedbytes);
WaveletCompressor compressor;
memcpy(compressor.compressed_data(), &waveletbuf[readbytes], nbytes);
readbytes += nbytes;
compressor.decompress(halffloat, nbytes, wtype_read, MYBLOCK);
}
for(int iz = 0; iz< _BLOCKSIZE_; ++iz)
for(int iy = 0; iy< _BLOCKSIZE_; ++iy)
for(int ix = 0; ix< _BLOCKSIZE_; ++ix)
printf("%d %d %d: %e\n", ix, iy, iz, MYBLOCK[iz][iy][ix]);
fclose(f);
}
}
public:
void set_threshold(const Real threshold) { this->threshold = threshold; }
void set_wtype_write(const int wtype) { this->wtype_write = wtype; }
void set_wtype_read(const int wtype) { this->wtype_read = wtype; }
void float16() { halffloat = true; }
void verbose() { verbosity = true; }
SerializerIO_WaveletCompression_MPI_SimpleBlocking():
written_bytes(0), pending_writes(0),
threshold(0), halffloat(false), verbosity(false),
workload_total(omp_get_max_threads()), workload_fwt(omp_get_max_threads()), workload_encode(omp_get_max_threads()),
workbuffer(omp_get_max_threads())
{
wtype_write = 1; // peh
wtype_read = 1; // peh
}
template< int channel >
void Write(GridType & inputGrid, string fileName, IterativeStreamer streamer = IterativeStreamer())
{
std::stringstream ss;
//ss << "." << streamer.name() << ".channel" << channel;
if (channel > 0)
ss << "." << streamer.name() << ".ch" << channel;
_write<channel>(inputGrid, fileName + ss.str(), streamer);
}
void Read(string fileName, IterativeStreamer streamer = IterativeStreamer())
{
for(int channel = 0; channel < NCHANNELS; ++channel)
{
std::stringstream ss;
if (channel > 0)
ss << "." << streamer.name() << ".ch" << channel;
_read(fileName + ss.str());
}
}
};
|
GB_binop__isge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_uint8
// A.*B function (eWiseMult): GB_AemultB__isge_uint8
// A*D function (colscale): GB_AxD__isge_uint8
// D*A function (rowscale): GB_DxB__isge_uint8
// C+=B function (dense accum): GB_Cdense_accumB__isge_uint8
// C+=b function (dense accum): GB_Cdense_accumb__isge_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_uint8
// C=scalar+B GB_bind1st__isge_uint8
// C=scalar+B' GB_bind1st_tran__isge_uint8
// C=A+scalar GB_bind2nd__isge_uint8
// C=A'+scalar GB_bind2nd_tran__isge_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT8 || GxB_NO_ISGE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isge_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sigmoid_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: hhchen@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#define SIGMOID_MAX(a, b) ((a) > (b) ? (a) : (b))
#define SIGMOID_MIN(a, b) ((a) < (b) ? (a) : (b))
int ref_sigmoid_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
int dim_num = input_tensor->dim_num;
if (dim_num == 4)
{
int batch = input_tensor->dims[0];
int channel = input_tensor->dims[1];
int cstep = input_tensor->dims[2] * input_tensor->dims[3];
int bstep = channel * cstep;
for (int n=0; n<batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int c=0; c<channel; c++)
{
float* input_data = (float*)input_tensor->data + n * bstep + c * cstep;
float* output_data = (float*)output_tensor->data + n * bstep + c * cstep;
for (int i=0; i<cstep; i++)
{
output_data[i] = SIGMOID_MIN(input_data[i], 30.0f);
output_data[i] = SIGMOID_MAX(input_data[i], -30.0f);
output_data[i] = 1.f / (1 + expf(-output_data[i]));
}
}
}
}
else
{
uint32_t elem_num = input_tensor->elem_num;
float* input_data = (float*)input_tensor->data;
float* output_data = (float*)output_tensor->data;
for (int i = 0; i < elem_num; i++)
{
output_data[i] = SIGMOID_MIN(input_data[i], 30.0f);
output_data[i] = SIGMOID_MAX(input_data[i], -30.0f);
output_data[i] = 1.f / (1 + expf(-output_data[i]));
}
}
return 0;
}
int ref_sigmoid_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
/* dequant */
uint8_t* input_uint8 = (uint8_t*)input_tensor->data;
uint8_t* output_uint8 = (uint8_t*)output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
int input_size = input_tensor->elem_num;
int output_size = output_tensor->elem_num;
float* input_fp32 = ( float* )sys_malloc(input_size * sizeof(float));
float* output_fp32 = ( float* )sys_malloc(output_size * sizeof(float));
for (int i = 0; i < input_size; i++)
{
input_fp32[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale;
}
for (int i = 0; i < input_size; i++)
{
output_fp32[i] = SIGMOID_MIN(input_fp32[i], 30.0f);
output_fp32[i] = SIGMOID_MAX(input_fp32[i], -30.0f);
output_fp32[i] = 1 / (1 + exp(-output_fp32[i]));
}
/* quant */
for (int i = 0; i < output_size; i++)
{
int udata = round(output_fp32[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(input_fp32);
sys_free(output_fp32);
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int reshape_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
int ret = 0;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
if (input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] ||
input_tensor->dims[3] != output_tensor->dims[3])
ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num);
return ret;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_sigmoid_fp32(input_tensor, output_tensor, exec_graph->num_thread);
else if(input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_sigmoid_uint8(input_tensor, output_tensor, exec_graph->num_thread);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops sigmoid_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape_node,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_sigmoid_ref_op()
{
return register_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops);
}
int unregister_sigmoid_ref_op()
{
return unregister_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops);
}
|
utils.c |
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "utils.h"
// #include "lapacke.h"
/* Auxiliary routine: printing a matrix */
// void print_matrix( char* desc, lapack_int m, lapack_int n, double* a, lapack_int lda )
// {
// lapack_int i, j;
// printf( "\n %s\n", desc );
// for( i = 0; i < m; i++ ) {
// for( j = 0; j < n; j++ ) printf( " %6.2f", a[i*lda+j] );
// printf( "\n" );
// }
// }
// /* Auxiliary routine: printing a vector of integers */
// void print_int_vector( char* desc, lapack_int n, lapack_int* a )
// {
// lapack_int j;
// printf( "\n %s\n", desc );
// for( j = 0; j < n; j++ ) printf( " %6i", a[j] );
// printf( "\n" );
// }
// void testlapacke()
// {
// // export LD_LIBRARY_PATH=/anaconda3/envs/owcsimpy-dev/lib/:$LD_LIBRARY_PATH
// int N = 5;
// int NRHS = 3;
// int LDA = N;
// int LDB = NRHS;
// lapack_int n = N, nrhs = NRHS, lda = LDA, ldb = LDB, info;
// lapack_int ipiv[5];
// double a[5*5] = {
// 6.80, -6.05, -0.45, 8.32, -9.67,
// -2.11, -3.30, 2.58, 2.71, -5.14,
// 5.66, 5.36, -2.70, 4.35, -7.26,
// 5.97, -4.44, 0.27, -7.17, 6.08,
// 8.23, 1.08, 9.04, 2.14, -6.87
// };
// double b[3*5] = {
// 4.02, -1.56, 9.81,
// 6.19, 4.00, -4.09,
// -8.22, -8.67, -4.57,
// -7.57, 1.75, -8.61,
// -3.03, 2.86, 8.99
// };
// double aNorm;
// double rcond;
// char ONE_NORM = '1';
// lapack_int NROWS = n;
// lapack_int NCOLS = n;
// lapack_int LEADING_DIMENSION_A = n;
// /* Print Entry Matrix */
// print_matrix( "Entry Matrix A", n, n, a, lda );
// /* Print Right Rand Side */
// print_matrix( "Right Rand Side", n, nrhs, b, ldb );
// printf( "\n" );
// /* Executable statements */
// printf( "LAPACKE_dgecon Example Program Results\n" );
// aNorm = LAPACKE_dlange(LAPACK_ROW_MAJOR, ONE_NORM, NROWS, NCOLS, a, LEADING_DIMENSION_A);
// info = LAPACKE_dgetrf(LAPACK_ROW_MAJOR, NROWS, NCOLS, a, LEADING_DIMENSION_A, ipiv);
// info = LAPACKE_dgecon(LAPACK_ROW_MAJOR, ONE_NORM, n, a, LEADING_DIMENSION_A, aNorm, &rcond); // aNorm should be 35.019999999999996
// double work[4*N];
// int iwork[N];
// //info = LAPACKE_dgecon_work(LAPACK_ROW_MAJOR, ONE_NORM, n, a, LEADING_DIMENSION_A, aNorm, &rcond, work, iwork); // aNorm should be 35.019999999999996
// //dgecon_( &ONE_NORM, &n, a, &LEADING_DIMENSION_A, &aNorm, &rcond, work, iwork, &info );
// /* Check for the exact singularity */
// if (info == 0)
// {
// printf("LAPACKE_dgecon completed SUCCESSFULLY...\n");
// }
// else if ( info < 0 )
// {
// printf( "Element %d of A had an illegal value\n", -info );
// exit( 1 );
// }
// else
// {
// printf( "Unrecognized value of INFO = %d\n", info );
// exit( 1 );
// }
// /* Print solution */
// printf("LAPACKE_dlange / One-norm of A = %lf\n", aNorm);
// printf("LAPACKE_dgecon / RCOND of A = %f\n", rcond);
// }
double calcDot_c(Point p1, Point p2)
{
return p1.x*p2.x+p1.y*p2.y+p1.z*p2.z;
}
Point calcAdd_c(Point p1, Point p2)
{
Point res;
res.x = p1.x+p2.x;
res.y = p1.y+p2.y;
res.z = p1.z+p2.z;
return res;
}
Point calcMult_c(double a,Point p1)
{
Point res;
res.x = a*p1.x;
res.y = a*p1.y;
res.z = a*p1.z;
return res;
}
// FIXME: change the type into bool or char
double checkBlockage_c(Point ctrPoint1, Point ctrPoint2, Point ctrPointB,
Point normalVectB, Point vertsB[4], const double areaB)
{
const double verysmall = 1e-6;
int i;
// FIXME: static, only handle rect, update for future release
Point verts[5];
Point triVerts[4];
// Point *verts = malloc(5*sizeof(Point)); // rectangle
// Point *triVerts = malloc(4*sizeof(Point)); // triangle
for (i=0;i<4;i++)
{
verts[i] = vertsB[i];
// printf("%f,%f,%f \n",vertsB[i].x,vertsB[i].y,vertsB[i].z);
}
verts[4] = vertsB[0];
// for (int i=0;i<5;i++)
// printf("%f,%f,%f \n",verts[i].x,verts[i].y,verts[i].z);
// double area = calcArea3DPoly_c(4,verts,normalVectB);
Point u,w;
u = calcAdd_c(ctrPoint2,calcMult_c(-1,ctrPoint1));
w = calcAdd_c(ctrPoint1,calcMult_c(-1,ctrPointB));
// u.x = ctrPoint2.x-ctrPoint1.x;
// u.y = ctrPoint2.y-ctrPoint1.y;
// u.z = ctrPoint2.z-ctrPoint1.z;
// w.x = ctrPoint1.x-ctrPointB.x;
// w.y = ctrPoint1.y-ctrPointB.y;
// w.z = ctrPoint1.z-ctrPointB.z;
double dot = calcDot_c(normalVectB,u);
// Sum of triangles' areas
double totalarea = 0;
// double area = 0; // Debug-purpose
if (fabs(dot) < verysmall) // Line segment and the planeB is parallel
{
// printf("dot:%f\n",dot);
// free(verts);
// free(triVerts);
return 0; // Not blocked
}
else
{
// Parametric value of the intersecting point
double ti = -1*calcDot_c(normalVectB,w)/calcDot_c(normalVectB,u);
// if(!(0 <= ti && ti <= 1) )
if(!(0 < ti && ti < 1) )
{
/*
This is when the intersecting point is not between plane1 and plane2.
In addition, a strict inequality is needed to handle the case when
the intersencting point comes from the same plane as the tail or head
points.
*/
// free(verts);
// free(triVerts);
return 0;
}
else
{
// The intersecting point
Point intersectingPoint = calcAdd_c(ctrPoint1,calcMult_c(ti,u));
// Calculate triangles' areas
for(i=0;i<4;i++) // loop for 4 triangles
{
// Assign triangles' vertices
triVerts[0] = intersectingPoint;
triVerts[1] = verts[i];
triVerts[2] = verts[i+1];
triVerts[3] = intersectingPoint;
totalarea += calcArea3DPoly_c(3,triVerts,normalVectB);
// Debug
// area = calcArea3DPoly_c(3,triVerts,normalVectB);
// totalarea += area;
// printf("i:%d,area:%f\n",i,area);
}
// printf("totalarea:%f\n",i,totalarea);
// free(verts);
// free(triVerts);
if(fabs(totalarea-areaB) < verysmall)
return 1; // Blocked
else
return 0; // Not blocked
// return totalarea;
}
}
}
// see: http://geomalgorithms.com/a01-_area.html
double calcArea3DPoly_c( int n, Point* V, Point N )
{
float area = 0;
float an, ax, ay, az; // abs value of normal and its coords
int coord; // coord to ignore: 1=x, 2=y, 3=z
int i, j, k; // loop indices
if (n < 3) return 0; // a degenerate polygon
// select largest abs coordinate to ignore for projection
ax = (N.x>0 ? N.x : -N.x); // abs x-coord
ay = (N.y>0 ? N.y : -N.y); // abs y-coord
az = (N.z>0 ? N.z : -N.z); // abs z-coord
coord = 3; // ignore z-coord
if (ax > ay) {
if (ax > az) coord = 1; // ignore x-coord
}
else if (ay > az) coord = 2; // ignore y-coord
// compute area of the 2D projection
switch (coord) {
case 1:
for (i=1, j=2, k=0; i<n; i++, j++, k++)
area += (V[i].y * (V[j].z - V[k].z));
break;
case 2:
for (i=1, j=2, k=0; i<n; i++, j++, k++)
area += (V[i].z * (V[j].x - V[k].x));
break;
case 3:
for (i=1, j=2, k=0; i<n; i++, j++, k++)
area += (V[i].x * (V[j].y - V[k].y));
break;
}
switch (coord) { // wrap-around term
case 1:
area += (V[n].y * (V[1].z - V[n-1].z));
break;
case 2:
area += (V[n].z * (V[1].x - V[n-1].x));
break;
case 3:
area += (V[n].x * (V[1].y - V[n-1].y));
break;
}
// scale to get area before projection
an = sqrt( ax*ax + ay*ay + az*az); // length of normal vector
switch (coord) {
case 1:
area *= (an / (2 * N.x));
break;
case 2:
area *= (an / (2 * N.y));
break;
case 3:
area *= (an / (2 * N.z));
}
return fabs(area); // I add absolute value here
}
double calcAngle_c( Point v1, Point v2 )
{
double length_v1 = v1.x*v1.x+v1.y*v1.y+v1.z*v1.z;
double length_v2 = v2.x*v2.x+v2.y*v2.y+v2.z*v2.z;
double dot = (v1.x*v2.x+v1.y*v2.y+v1.z*v2.z)/sqrt(length_v1*length_v2);
// Clipping to -1 to 1 for numerical stability
dot = ( dot < -1.0 ? -1.0 : ( dot > 1.0 ? 1.0 : dot ) );
double angle = acos(dot);
return angle;
}
// see:
// https://github.com/chtran/computer_vision/blob/master/proj3/code/vlfeat/vl/rodrigues.c
// https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
void calcRodriguesMtx_c(const double angle,const double* k_pt, double* R_pt)
{
#define k(i) k_pt[(i)]
#define R(i,j) R_pt[(i)+3*(j)]
const double verysmall = 1e-6;
double kx = k_pt[0];
double ky = k_pt[1];
double kz = k_pt[2];
double length_k = sqrt(kx*kx +
ky*ky +
kz*kz ) ;
if (length_k < verysmall)
{
R(0,0) = 1.0 ; R(0,1) = 0.0 ; R(0,2) = 0.0 ;
R(1,0) = 0.0 ; R(1,1) = 1.0 ; R(1,2) = 0.0 ;
R(2,0) = 0.0 ; R(2,1) = 0.0 ; R(2,2) = 1.0 ;
}
else
{
double x = kx / length_k ;
double y = ky / length_k ;
double z = kz / length_k ;
double xx = x*x ;
double xy = x*y ;
double xz = x*z ;
double yy = y*y ;
double yz = y*z ;
double zz = z*z ;
const double yx = xy ;
// const double zx = xz ;
// const double zy = yz ;
double sth = sin(-angle) ;
double cth = cos(-angle) ;
double mcth = 1.0 - cth ;
R(0,0) = 1 - mcth * (yy+zz) ;
R(1,0) = sth*z + mcth * xy ;
R(2,0) = - sth*y + mcth * xz ;
R(0,1) = - sth*z + mcth * yx ;
R(1,1) = 1 - mcth * (zz+xx) ;
R(2,1) = sth*x + mcth * yz ;
R(0,2) = sth*y + mcth * xz ;
R(1,2) = - sth*x + mcth * yz ;
R(2,2) = 1 - mcth * (xx+yy) ;
}
return;
}
void initPoint(Point *pt, double x, double y, double z)
{
pt->x = x;
pt->y = y;
pt->z = z;
}
// JUNK
// void getPowerDelayMtxAll_c(int N,
// BareDetector collector,PointSource emitter,
// SimplePlane* planes, double* powMtx,double* delayMtx,
// double* powVect_t,double* delayVect_t,
// double* powVect_r,double* delayVect_r)
// {
// /*
// Refer to eq. (18).
// Assumptions:
// m =1
// FoV = pi/2
// Currently assume no blockage to test the speed.
// */
// #define isClose(a,b) fabs((a) - (b)) <= (1e-08 + 1e-05 * fabs(b))
// #define P(i,j) powMtx[(j)+N*(i)] // column-wise (row-major)
// #define D(i,j) delayMtx[(j)+N*(i)] // column-wise (row-major)
// const int speed_of_light = 299792458;
// int i;
// int k;
// #pragma omp parallel for
// for(i=0;i<N;i++)
// { // Iteration over the row (collector)
// // This is very important to define it inside the loop if you
// // want to use openmp
// char isVisible; // Need the smallest size. Hence using char
// double vartheta,psi;
// char isVisible_t; // Need the smallest size. Hence using char
// double vartheta_t,psi_t;
// char isVisible_r; // Need the smallest size. Hence using char
// double vartheta_r,psi_r;
// // For t_f
// isVisible_t = 1;
// vartheta_t = calcAngle_c(emitter.normalVect,calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,emitter.ctrPoint)));
// psi_t = calcAngle_c(planes[i].normalVect,calcAdd_c(emitter.ctrPoint,calcMult_c(-1,planes[i].ctrPoint)));
// // Visible only when 0 <= vartheta <= pi/2 and 0<= psi <= pi/2
// if (!(vartheta_t>=0 && vartheta_t < M_PI/2) || !(psi_t>=0 && psi_t < M_PI/2))
// {
// isVisible_t = 0;
// }
// if (isClose(vartheta_t,M_PI/2) || isClose(psi_t,M_PI/2))
// {
// isVisible_t = 0;
// }
// // distance
// Point dist = calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,emitter.ctrPoint));
// double d_ki_square = calcDot_c(dist,dist);
// if(!isVisible_t)
// {
// powVect_t[i] = 0;
// delayVect_t[i] = 0;
// }
// else
// {
// powVect_t[i] = ((emitter.m+1)/(2*M_PI))*pow(cos(vartheta_t),emitter.m)*planes[i].area*cos(psi_t)/d_ki_square;
// delayVect_t[i] = sqrt(d_ki_square)/speed_of_light;
// }
// // End for t_f
// // For r_f
// isVisible_r = 1;
// vartheta_r = calcAngle_c(planes[i].normalVect,calcAdd_c(collector.ctrPoint,calcMult_c(-1,planes[i].ctrPoint)));
// psi_r = calcAngle_c(collector.normalVect,calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,collector.ctrPoint)));
// // Visible only when 0 <= vartheta <= pi/2 and 0<= psi <= pi/2
// if (!(vartheta_r>=0 && vartheta_r < M_PI/2) || !(psi_r>=0 && psi_r < collector.FoV))
// {
// isVisible_r = 0;
// }
// if (isClose(vartheta_r,M_PI/2) || isClose(psi_r,collector.FoV))
// {
// isVisible_r = 0;
// }
// // distance
// dist = calcAdd_c(collector.ctrPoint,calcMult_c(-1,planes[i].ctrPoint));
// d_ki_square = calcDot_c(dist,dist);
// if(!isVisible_r)
// {
// powVect_r[i] = 0;
// delayVect_r[i] = 0;
// }
// else
// {
// powVect_r[i] = (1/M_PI)*cos(vartheta_r)*collector.area*cos(psi_r)/d_ki_square;
// delayVect_r[i] = sqrt(d_ki_square)/speed_of_light;
// }
// // End for r_f
// for(k=0;k<N;k++)
// { // Iteration over the column (emitter)
// if(i!=k)
// {
// // Calculate visibility based on angles
// isVisible = 1;
// vartheta = calcAngle_c(planes[k].normalVect,calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,planes[k].ctrPoint)));
// psi = calcAngle_c(planes[i].normalVect,calcAdd_c(planes[k].ctrPoint,calcMult_c(-1,planes[i].ctrPoint)));
// // Visible only when 0 <= vartheta <= pi/2 and 0<= psi <= pi/2
// if (!(vartheta>=0 && vartheta < M_PI/2) || !(psi>=0 && psi < M_PI/2))
// {
// isVisible = 0;
// }
// if (isClose(vartheta,M_PI/2) || isClose(psi,M_PI/2))
// {
// isVisible = 0;
// }
// // distance
// dist = calcAdd_c(planes[i].ctrPoint,calcMult_c(-1,planes[k].ctrPoint));
// d_ki_square = calcDot_c(dist,dist);
// if(!isVisible)
// {
// P(i,k) = 0;
// D(i,k) = 0;
// }
// else
// {
// P(i,k) = (1/M_PI)*cos(vartheta)*planes[i].area*cos(psi)/d_ki_square;
// D(i,k) = sqrt(d_ki_square)/speed_of_light;
// }
// }
// else
// {
// P(i,k) = 0;
// D(i,k) = 0;
// }
// }
// }
// }
|
GB_unaryop__identity_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_uint16
// op(A') function: GB_tran__identity_fp32_uint16
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_uint16
(
float *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc64_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_int8)
// op(A') function: GB (_unop_tran__identity_fc64_int8)
// C type: GxB_FC64_t
// A type: int8_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_int8)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi_omp_critical_6.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The is the original sequential program. It uses the timer
from the OpenMP runtime library
History: Written by Tim Mattson, 11/99.
*/
#include <stdio.h>
#include <omp.h>
static long num_steps = 1024 * 1024 * 1024;
double step;
int main () {
const int MAX_T = 16;
int i, t;
double x, pi;
double start_time, run_time;
step = 1.0/(double) num_steps;
for(t = 1; t <= MAX_T; t*=2) {
start_time = omp_get_wtime();
omp_set_num_threads(t);
pi = 0.0;
#pragma omp parallel
{
int i, nt;
double x, sum = 0;
i = omp_get_thread_num();
nt = omp_get_num_threads();
for (; i < num_steps; i += nt){
x = (i + 0.5) * step;
sum += 4.0/(1.0+x*x);
}
#pragma omp critical
pi += sum;
}
pi = pi * step;
run_time = omp_get_wtime() - start_time;
printf("pi with %d threads: %.16lf in %lf seconds\n",t , pi,run_time);
}
}
|
nmt_master.c | #include "config.h"
#include "utils.h"
static void purify_generic(nmt_field *fl,flouble *mask,fcomplex **walm0,
flouble **maps_in,fcomplex **alms_out,int niter)
{
if(fl->pure_b || fl->pure_e) {
nmt_purify(fl,mask,walm0,maps_in,maps_in,alms_out,niter);
}
else {
int im1;
for(im1=0;im1<fl->nmaps;im1++)
he_map_product(fl->cs,maps_in[im1],mask,maps_in[im1]);
he_map2alm(fl->cs,fl->lmax,1,fl->spin,maps_in,alms_out,niter);
}
}
static void nmt_workspace_store_bins(nmt_workspace *w,
nmt_binning_scheme *bin)
{
int ii;
w->bin=my_malloc(sizeof(nmt_binning_scheme));
w->bin->n_bands=bin->n_bands;
w->bin->nell_list=my_malloc(w->bin->n_bands*sizeof(int));
memcpy(w->bin->nell_list,bin->nell_list,w->bin->n_bands*sizeof(int));
w->bin->ell_list=my_malloc(w->bin->n_bands*sizeof(int *));
w->bin->w_list=my_malloc(w->bin->n_bands*sizeof(flouble *));
w->bin->f_ell=my_malloc(w->bin->n_bands*sizeof(flouble *));
for(ii=0;ii<w->bin->n_bands;ii++) {
w->bin->ell_list[ii]=my_malloc(w->bin->nell_list[ii]*sizeof(int));
w->bin->w_list[ii]=my_malloc(w->bin->nell_list[ii]*sizeof(flouble));
w->bin->f_ell[ii]=my_malloc(w->bin->nell_list[ii]*sizeof(flouble));
memcpy(w->bin->ell_list[ii],bin->ell_list[ii],w->bin->nell_list[ii]*sizeof(int));
memcpy(w->bin->w_list[ii],bin->w_list[ii],w->bin->nell_list[ii]*sizeof(flouble));
memcpy(w->bin->f_ell[ii],bin->f_ell[ii],w->bin->nell_list[ii]*sizeof(flouble));
}
w->bin->ell_max=bin->ell_max;
}
static nmt_workspace *nmt_workspace_new(nmt_curvedsky_info *cs,int ncls,
nmt_binning_scheme *bin,int is_teb,
int lmax_fields,int lmax_mask)
{
int ii;
nmt_workspace *w=my_malloc(sizeof(nmt_workspace));
w->lmax=bin->ell_max;
w->lmax_fields=lmax_fields;
w->lmax_mask=lmax_mask;
w->is_teb=is_teb;
w->ncls=ncls;
w->cs=nmt_curvedsky_info_copy(cs);
w->pcl_masks=my_malloc((w->lmax_mask+1)*sizeof(flouble));
w->beam_prod=my_malloc((w->lmax_fields+1)*sizeof(flouble));
w->coupling_matrix_unbinned=my_malloc(w->ncls*(w->lmax+1)*sizeof(flouble *));
for(ii=0;ii<w->ncls*(w->lmax+1);ii++)
w->coupling_matrix_unbinned[ii]=my_calloc(w->ncls*(w->lmax+1),sizeof(flouble));
nmt_workspace_store_bins(w,bin);
w->coupling_matrix_binned=gsl_matrix_alloc(w->ncls*w->bin->n_bands,w->ncls*w->bin->n_bands);
w->coupling_matrix_perm=gsl_permutation_alloc(w->ncls*w->bin->n_bands);
return w;
}
void nmt_workspace_free(nmt_workspace *w)
{
int ii;
free(w->cs);
gsl_permutation_free(w->coupling_matrix_perm);
gsl_matrix_free(w->coupling_matrix_binned);
nmt_bins_free(w->bin);
for(ii=0;ii<w->ncls*(w->lmax+1);ii++)
free(w->coupling_matrix_unbinned[ii]);
free(w->coupling_matrix_unbinned);
free(w->beam_prod);
free(w->pcl_masks);
free(w);
}
static void bin_coupling_matrix(nmt_workspace *w)
{
int icl_a,icl_b,ib2,ib3,l2,l3,i2,i3,sig;
for(icl_a=0;icl_a<w->ncls;icl_a++) {
for(icl_b=0;icl_b<w->ncls;icl_b++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++) {
for(ib3=0;ib3<w->bin->n_bands;ib3++) {
double coupling_b=0;
for(i2=0;i2<w->bin->nell_list[ib2];i2++) {
l2=w->bin->ell_list[ib2][i2];
for(i3=0;i3<w->bin->nell_list[ib3];i3++) {
l3=w->bin->ell_list[ib3][i3];
coupling_b+=w->coupling_matrix_unbinned[w->ncls*l2+icl_a][w->ncls*l3+icl_b]*
w->beam_prod[l3]*w->bin->w_list[ib2][i2]*w->bin->f_ell[ib2][i2]/w->bin->f_ell[ib3][i3];
}
}
gsl_matrix_set(w->coupling_matrix_binned,w->ncls*ib2+icl_a,w->ncls*ib3+icl_b,coupling_b);
}
}
}
}
gsl_linalg_LU_decomp(w->coupling_matrix_binned,w->coupling_matrix_perm,&sig);
}
void nmt_update_coupling_matrix(nmt_workspace *w,int n_rows,double *new_matrix)
{
int ii;
if(n_rows!=w->ncls*(w->lmax+1)) {
report_error(NMT_ERROR_INCONSISTENT,"Input matrix has the wrong size. Expected %d, got %d\n",
w->ncls*(w->lmax+1),n_rows);
}
for(ii=0;ii<n_rows;ii++)
memcpy(w->coupling_matrix_unbinned[ii],&(new_matrix[ii*n_rows]),n_rows*sizeof(flouble));
bin_coupling_matrix(w);
}
void nmt_workspace_update_beams(nmt_workspace *w,
int nl1,double *b1,
int nl2,double *b2)
{
if((nl1<=w->lmax_fields) || (nl2<=w->lmax_fields)) {
report_error(NMT_ERROR_INCONSISTENT,
"New beams are not large enough\n");
}
int ii;
for(ii=0;ii<=w->lmax_fields;ii++)
w->beam_prod[ii]=b1[ii]*b2[ii];
//Recompute the binned coupling matrix
bin_coupling_matrix(w);
}
void nmt_workspace_update_binning(nmt_workspace *w,
nmt_binning_scheme *bin)
{
if(bin->ell_max!=w->bin->ell_max) {
report_error(NMT_ERROR_INCONSISTENT,
"New bins must have the same ell_max\n");
}
//Store new bins
nmt_bins_free(w->bin);
nmt_workspace_store_bins(w,bin);
//Rebin matrix
gsl_matrix_free(w->coupling_matrix_binned);
gsl_permutation_free(w->coupling_matrix_perm);
w->coupling_matrix_binned=gsl_matrix_alloc(w->ncls*w->bin->n_bands,w->ncls*w->bin->n_bands);
w->coupling_matrix_perm=gsl_permutation_alloc(w->ncls*w->bin->n_bands);
bin_coupling_matrix(w);
}
static int toeplitz_wrap(int il, int lmaxp1)
{
if(il<0)
return il+lmaxp1;
else if(il>=lmaxp1)
return il-lmaxp1;
else
return il;
}
static int lend_toeplitz(int l2, int l_toeplitz, int l_exact, int dl_band, int lmax)
{
int l_end;
if(l_toeplitz > 0) {
if(l2<=l_exact)
l_end = lmax;
else if(l2<=l_toeplitz)
l_end = l2+dl_band;
else
l_end = l2;
}
else
l_end = lmax;
return fmin(l_end, lmax);
}
static void populate_toeplitz(nmt_master_calculator *c, flouble **pcl_masks, int lt)
{
int ic,l2;
double ***tplz_00, ***tplz_0s, ***tplz_pp, ***tplz_mm;
if(c->has_00) {
tplz_00=my_malloc(c->npcl*sizeof(flouble **));
for(ic=0;ic<c->npcl;ic++) {
tplz_00[ic]=my_malloc(2*sizeof(flouble *));
tplz_00[ic][0]=my_calloc((c->lmax+1),sizeof(flouble));
tplz_00[ic][1]=my_calloc((c->lmax+1),sizeof(flouble));
}
}
if(c->has_0s) {
tplz_0s=my_malloc(c->npcl*sizeof(flouble **));
for(ic=0;ic<c->npcl;ic++) {
tplz_0s[ic]=my_malloc(2*sizeof(flouble *));
tplz_0s[ic][0]=my_calloc((c->lmax+1),sizeof(flouble));
tplz_0s[ic][1]=my_calloc((c->lmax+1),sizeof(flouble));
}
}
if(c->has_ss) {
tplz_pp=my_malloc(c->npcl*sizeof(flouble **));
tplz_mm=my_malloc(c->npcl*sizeof(flouble **));
for(ic=0;ic<c->npcl;ic++) {
tplz_pp[ic]=my_malloc(2*sizeof(flouble *));
tplz_pp[ic][0]=my_calloc((c->lmax+1),sizeof(flouble));
tplz_pp[ic][1]=my_calloc((c->lmax+1),sizeof(flouble));
tplz_mm[ic]=my_malloc(2*sizeof(flouble *));
tplz_mm[ic][0]=my_calloc((c->lmax+1),sizeof(flouble));
tplz_mm[ic][1]=my_calloc((c->lmax+1),sizeof(flouble));
}
}
int lstart=0;
int max_spin=NMT_MAX(c->s1, c->s2);
int has_ss2=(c->s1!=0) && (c->s2!=0) && (c->s1!=c->s2);
if(!(c->has_00))
lstart=max_spin;
#pragma omp parallel default(none) \
shared(c, lstart, pcl_masks, lt, has_ss2) \
shared(tplz_00,tplz_0s,tplz_pp,tplz_mm)
{
int il3,ll2,icc;
int l3_list[2];
double *wigner_00=NULL,*wigner_ss1=NULL,*wigner_ss2=NULL;
if(c->has_00 || c->has_0s)
wigner_00=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
if(c->has_0s || c->has_ss)
wigner_ss1=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
if(has_ss2)
wigner_ss2=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
else
wigner_ss2=wigner_ss1;
l3_list[0]=0;
l3_list[1]=lt;
#pragma omp for schedule(dynamic)
for(ll2=lstart;ll2<=c->lmax;ll2++) {
l3_list[0]=ll2; //Diagonal first, then column
for(il3=0;il3<2;il3++) {
int ll3=l3_list[il3];
int jj,l1,lmin_here,lmax_here;
int lmin_00=0,lmax_00=2*(c->lmax_mask+1)+1;
int lmin_ss1=0,lmax_ss1=2*(c->lmax_mask+1)+1;
int lmin_ss2=0,lmax_ss2=2*(c->lmax_mask+1)+1;
int lmin_12=0,lmax_12=2*(c->lmax_mask+1)+1;
int lmin_02=0,lmax_02=2*(c->lmax_mask+1)+1;
lmin_here=abs(ll2-ll3);
lmax_here=ll2+ll3;
if(c->has_00 || c->has_0s)
drc3jj(ll2,ll3,0,0,&lmin_00,&lmax_00,wigner_00,2*(c->lmax_mask+1));
if(c->has_0s || c->has_ss)
drc3jj(ll2,ll3,c->s1,-c->s1,&lmin_ss1,&lmax_ss1,wigner_ss1,2*(c->lmax_mask+1));
if(has_ss2)
drc3jj(ll2,ll3,c->s2,-c->s2,&lmin_ss2,&lmax_ss2,wigner_ss2,2*(c->lmax_mask+1));
else {
lmin_ss2=lmin_ss1;
lmax_ss2=lmax_ss1;
}
for(l1=lmin_here;l1<=lmax_here;l1++) {
int ipp;
if(l1<=c->lmax_mask) {
flouble wfac;
flouble w00=0,wss1=0,wss2=0,w12=0,w02=0;
int j00=l1-lmin_00;
int jss1=l1-lmin_ss1;
int jss2=l1-lmin_ss2;
if(c->has_00 || c->has_0s)
w00=j00 < 0 ? 0 : wigner_00[j00];
if(c->has_ss || c->has_0s) {
wss1=jss1 < 0 ? 0 : wigner_ss1[jss1];
wss2=jss2 < 0 ? 0 : wigner_ss2[jss2];
}
for(icc=0;icc<c->npcl;icc++) {
double *pcl=pcl_masks[icc];
if(c->has_00) {
wfac=pcl[l1]*w00*w00;
tplz_00[icc][il3][ll2]+=wfac;
}
if(c->has_0s) {
wfac=pcl[l1]*wss1*w00;
tplz_0s[icc][il3][ll2]+=wfac;
}
if(c->has_ss) {
int suml=l1+ll2+ll3;
wfac=pcl[l1]*wss1*wss2;
if(suml & 1) //Odd sum
tplz_mm[icc][il3][ll2]+=wfac;
else
tplz_pp[icc][il3][ll2]+=wfac;
}
}
}
}
}
} //end omp for
free(wigner_00);
free(wigner_ss1);
if(has_ss2)
free(wigner_ss2);
} //end omp parallel
for(ic=0;ic<c->npcl;ic++) {
//Take absolute value of the diagonal to avoid sqrt(-1) later
for(l2=0;l2<=c->lmax;l2++) {
if(c->has_00)
tplz_00[ic][0][l2]=fabs(tplz_00[ic][0][l2]);
if(c->has_0s)
tplz_0s[ic][0][l2]=fabs(tplz_0s[ic][0][l2]);
if(c->has_ss) {
tplz_pp[ic][0][l2]=fabs(tplz_pp[ic][0][l2]);
tplz_mm[ic][0][l2]=fabs(tplz_mm[ic][0][l2]);
}
}
//Compute column correlation coefficient
for(l2=0;l2<=c->lmax;l2++) {
double d1,d2;
if(c->has_00) {
d1=tplz_00[ic][0][l2];
d2=tplz_00[ic][0][lt];
if((d1>0) && (d2>0))
tplz_00[ic][1][l2]=tplz_00[ic][1][l2]/sqrt(d1*d2);
else
tplz_00[ic][1][l2]=0;
}
if(c->has_0s) {
d1=tplz_0s[ic][0][l2];
d2=tplz_0s[ic][0][lt];
if((d1>0) && (d2>0))
tplz_0s[ic][1][l2]=tplz_0s[ic][1][l2]/sqrt(d1*d2);
else
tplz_0s[ic][1][l2]=0;
}
if(c->has_ss) {
d1=tplz_pp[ic][0][l2];
d2=tplz_pp[ic][0][lt];
if((d1>0) && (d2>0))
tplz_pp[ic][1][l2]=tplz_pp[ic][1][l2]/sqrt(d1*d2);
else
tplz_pp[ic][1][l2]=0;
d1=tplz_mm[ic][0][l2];
d2=tplz_mm[ic][0][lt];
if((d1>0) && (d2>0))
tplz_mm[ic][1][l2]=tplz_mm[ic][1][l2]/sqrt(d1*d2);
else
tplz_mm[ic][1][l2]=0;
}
}
//Populate matrices
#pragma omp parallel default(none) \
shared(c, ic, lt, tplz_00, tplz_0s, tplz_pp, tplz_mm)
{
int ll2, ll3;
#pragma omp for schedule(dynamic)
for(ll2=0;ll2<=c->lmax;ll2++) {
for(ll3=0;ll3<=ll2;ll3++) {
int il=toeplitz_wrap(ll2+lt-ll3,c->lmax+1);
if(c->has_00)
c->xi_00[ic][ll2][ll3]=tplz_00[ic][1][il]*sqrt(tplz_00[ic][0][ll2]*tplz_00[ic][0][ll3]);
if(c->has_0s)
c->xi_0s[ic][0][ll2][ll3]=tplz_0s[ic][1][il]*sqrt(tplz_0s[ic][0][ll2]*tplz_0s[ic][0][ll3]);
if(c->has_ss) {
c->xi_pp[ic][0][ll2][ll3]=tplz_pp[ic][1][il]*sqrt(tplz_pp[ic][0][ll2]*tplz_pp[ic][0][ll3]);
c->xi_mm[ic][0][ll2][ll3]=tplz_mm[ic][1][il]*sqrt(tplz_mm[ic][0][ll2]*tplz_mm[ic][0][ll3]);
}
if(ll3!=ll2) {
if(c->has_00)
c->xi_00[ic][ll3][ll2]=c->xi_00[ic][ll2][ll3];
if(c->has_0s)
c->xi_0s[ic][0][ll3][ll2]=c->xi_0s[ic][0][ll2][ll3];
if(c->has_ss) {
c->xi_pp[ic][0][ll3][ll2]=c->xi_pp[ic][0][ll2][ll3];
c->xi_mm[ic][0][ll3][ll2]=c->xi_mm[ic][0][ll2][ll3];
}
}
}
} //end omp for
} //end omp parallel
}
if(c->has_ss) {
for(ic=0;ic<c->npcl;ic++) {
free(tplz_pp[ic][0]);
free(tplz_pp[ic][1]);
free(tplz_pp[ic]);
free(tplz_mm[ic][0]);
free(tplz_mm[ic][1]);
free(tplz_mm[ic]);
}
free(tplz_pp);
free(tplz_mm);
}
if(c->has_0s) {
for(ic=0;ic<c->npcl;ic++) {
free(tplz_0s[ic][0]);
free(tplz_0s[ic][1]);
free(tplz_0s[ic]);
}
free(tplz_0s);
}
if(c->has_00) {
for(ic=0;ic<c->npcl;ic++) {
free(tplz_00[ic][0]);
free(tplz_00[ic][1]);
free(tplz_00[ic]);
}
free(tplz_00);
}
}
nmt_master_calculator *nmt_compute_master_coefficients(int lmax, int lmax_mask,
int npcl, flouble **pcl_masks,
int s1, int s2,
int pure_e1, int pure_b1,
int pure_e2, int pure_b2,
int do_teb, int l_toeplitz,
int l_exact, int dl_band)
{
int ic, ip, ii;
nmt_master_calculator *c=my_malloc(sizeof(nmt_master_calculator));
c->pure_any=pure_e1 || pure_b1 || pure_e2 || pure_b2;
c->npcl=npcl;
c->lmax=lmax;
c->lmax_mask=lmax_mask;
c->pure_e1=pure_e1;
c->pure_b1=pure_b1;
c->pure_e2=pure_e2;
c->pure_b2=pure_b2;
c->has_00=0;
c->has_0s=0;
c->has_ss=0;
c->xi_00=NULL;
c->xi_0s=NULL;
c->xi_pp=NULL;
c->xi_mm=NULL;
if(c->pure_any) {
c->npure_0s=2;
c->npure_ss=3;
}
else {
c->npure_0s=1;
c->npure_ss=1;
}
if(s1==0) {
if(s2==0) {
c->s1=0; c->s2=0;
}
else {
c->s1=s2; c->s2=0;
}
}
else {
c->s1=s1;
c->s2=s2;
}
if(do_teb) {
c->has_00=1;
c->has_0s=1;
c->has_ss=1;
}
else {
c->has_00 = (c->s1==0) && (c->s2==0);
c->has_0s = ((c->s1==0) && (c->s2!=0)) || ((c->s1!=0) && (c->s2==0));
c->has_ss = (c->s1!=0) && (c->s2!=0);
}
if(c->has_00) {
c->xi_00=my_malloc(c->npcl*sizeof(flouble **));
for(ic=0;ic<c->npcl;ic++) {
c->xi_00[ic]=my_malloc((c->lmax+1)*sizeof(flouble *));
for(ii=0;ii<=c->lmax;ii++)
c->xi_00[ic][ii]=my_calloc((c->lmax+1),sizeof(flouble));
}
}
if(c->has_0s) {
c->xi_0s=my_malloc(c->npcl*sizeof(flouble ***));
for(ic=0;ic<c->npcl;ic++) {
c->xi_0s[ic]=my_malloc(c->npure_0s*sizeof(flouble **));
for(ip=0;ip<c->npure_0s;ip++) {
c->xi_0s[ic][ip]=my_malloc((c->lmax+1)*sizeof(flouble *));
for(ii=0;ii<=c->lmax;ii++)
c->xi_0s[ic][ip][ii]=my_calloc((c->lmax+1),sizeof(flouble));
}
}
}
if(c->has_ss) {
c->xi_pp=my_malloc(c->npcl*sizeof(flouble ***));
c->xi_mm=my_malloc(c->npcl*sizeof(flouble ***));
for(ic=0;ic<c->npcl;ic++) {
c->xi_pp[ic]=my_malloc(c->npure_ss*sizeof(flouble **));
c->xi_mm[ic]=my_malloc(c->npure_ss*sizeof(flouble **));
for(ip=0;ip<c->npure_ss;ip++) {
c->xi_pp[ic][ip]=my_malloc((c->lmax+1)*sizeof(flouble *));
c->xi_mm[ic][ip]=my_malloc((c->lmax+1)*sizeof(flouble *));
for(ii=0;ii<=c->lmax;ii++) {
c->xi_pp[ic][ip][ii]=my_calloc((c->lmax+1),sizeof(flouble));
c->xi_mm[ic][ip][ii]=my_calloc((c->lmax+1),sizeof(flouble));
}
}
}
}
if(l_toeplitz>0)
populate_toeplitz(c, pcl_masks, l_toeplitz);
int lstart=0;
int max_spin=NMT_MAX(c->s1, c->s2);
int has_ss2=(c->s1!=0) && (c->s2!=0) && (!do_teb) && (c->s1!=c->s2);
if(!(c->has_00))
lstart=max_spin;
#pragma omp parallel default(none) \
shared(c, lstart, do_teb, pcl_masks, has_ss2) \
shared(l_toeplitz, l_exact, dl_band)
{
int ll2,ll3,icc;
double *wigner_00=NULL,*wigner_ss1=NULL,*wigner_12=NULL,*wigner_02=NULL,*wigner_ss2=NULL;
int pe1=c->pure_e1,pe2=c->pure_e2,pb1=c->pure_b1,pb2=c->pure_b2;
if(c->has_00 || c->has_0s)
wigner_00=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
if(c->has_0s || c->has_ss)
wigner_ss1=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
if(has_ss2)
wigner_ss2=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
else
wigner_ss2=wigner_ss1;
if(c->pure_any) {
wigner_12=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
wigner_02=my_malloc(2*(c->lmax_mask+1)*sizeof(double));
}
#pragma omp for schedule(dynamic)
for(ll2=lstart;ll2<=c->lmax;ll2++) {
int l3_end=lend_toeplitz(ll2, l_toeplitz, l_exact, dl_band, c->lmax);
int l3_start=lstart;
if(!(c->pure_any)) //We can use symmetry
l3_start=ll2;
for(ll3=l3_start;ll3<=l3_end;ll3++) {
int jj,l1,lmin_here,lmax_here;
int lmin_00=0,lmax_00=2*(c->lmax_mask+1)+1;
int lmin_ss1=0,lmax_ss1=2*(c->lmax_mask+1)+1;
int lmin_ss2=0,lmax_ss2=2*(c->lmax_mask+1)+1;
int lmin_12=0,lmax_12=2*(c->lmax_mask+1)+1;
int lmin_02=0,lmax_02=2*(c->lmax_mask+1)+1;
lmin_here=abs(ll2-ll3);
lmax_here=ll2+ll3;
if(l_toeplitz > 0) {
//Set all elements that will be recomputed to zero
for(icc=0;icc<c->npcl;icc++) {
if(c->has_00)
c->xi_00[icc][ll2][ll3]=0;
if(c->has_0s)
c->xi_0s[icc][0][ll2][ll3]=0;
if(c->has_ss) {
c->xi_pp[icc][0][ll2][ll3]=0;
c->xi_mm[icc][0][ll2][ll3]=0;
}
}
}
if(c->has_00 || c->has_0s)
drc3jj(ll2,ll3,0,0,&lmin_00,&lmax_00,wigner_00,2*(c->lmax_mask+1));
if(c->has_0s || c->has_ss)
drc3jj(ll2,ll3,c->s1,-c->s1,&lmin_ss1,&lmax_ss1,wigner_ss1,2*(c->lmax_mask+1));
if(has_ss2)
drc3jj(ll2,ll3,c->s2,-c->s2,&lmin_ss2,&lmax_ss2,wigner_ss2,2*(c->lmax_mask+1));
else {
lmin_ss2=lmin_ss1;
lmax_ss2=lmax_ss1;
}
if(c->pure_any) {
drc3jj(ll2,ll3,1,-2,&lmin_12,&lmax_12,wigner_12,2*(c->lmax_mask+1));
drc3jj(ll2,ll3,0,-2,&lmin_02,&lmax_02,wigner_02,2*(c->lmax_mask+1));
}
for(l1=lmin_here;l1<=lmax_here;l1++) {
int ipp;
if(l1<=c->lmax_mask) {
flouble wfac,fac_12=0,fac_02=0;
flouble w00=0,wss1=0,wss2=0,w12=0,w02=0;
int j02,j12;
int j00=l1-lmin_00;
int jss1=l1-lmin_ss1;
int jss2=l1-lmin_ss2;
if(c->has_00 || c->has_0s)
w00=j00 < 0 ? 0 : wigner_00[j00];
if(c->has_ss || c->has_0s) {
wss1=jss1 < 0 ? 0 : wigner_ss1[jss1];
wss2=jss2 < 0 ? 0 : wigner_ss2[jss2];
}
if(c->pure_any) {
j12=l1-lmin_12;
j02=l1-lmin_02;
if(ll2>1.) {
fac_12=2*sqrt((l1+1.)*(l1+0.)/((ll2+2)*(ll2-1.)));
if(l1>1.)
fac_02=sqrt((l1+2.)*(l1+1.)*(l1+0.)*(l1-1.)/((ll2+2.)*(ll2+1.)*(ll2+0.)*(ll2-1.)));
else
fac_02=0;
}
else {
fac_12=0;
fac_02=0;
}
if(j12<0) { //If out of range, w12 is just 0
fac_12=0;
j12=0;
}
if(j02<0) { //if out of range, w02 is just 0
fac_02=0;
j02=0;
}
w12=j12 < 0 ? 0 : wigner_12[j12];
w02=j02 < 0 ? 0 : wigner_02[j02];
}
for(icc=0;icc<c->npcl;icc++) {
double *pcl=pcl_masks[icc];
if(c->has_00) {
wfac=pcl[l1]*w00*w00;
c->xi_00[icc][ll2][ll3]+=wfac;
}
if(c->has_0s) {
double wfac_ispure[2];
wfac_ispure[0]=wss1;
wfac_ispure[0]*=pcl[l1]*w00;
if(c->pure_any) {
wfac_ispure[1]=wss1+fac_12*w12+fac_02*w02;
wfac_ispure[1]*=pcl[l1]*w00;
}
for(ipp=0;ipp<c->npure_0s;ipp++)
c->xi_0s[icc][ipp][ll2][ll3]+=wfac_ispure[ipp];
}
if(c->has_ss) {
double wfac_ispure[3];
int suml=l1+ll2+ll3;
wfac_ispure[0]=wss1;
wfac_ispure[0]*=wss2*pcl[l1];
if(c->pure_any) {
wfac_ispure[1]=wss1+fac_12*w12+fac_02*w02;
wfac_ispure[2]=wfac_ispure[1]*wfac_ispure[1]*pcl[l1];
wfac_ispure[1]*=wss2*pcl[l1];
}
if(suml & 1) { //Odd sum
for(ipp=0;ipp<c->npure_ss;ipp++)
c->xi_mm[icc][ipp][ll2][ll3]+=wfac_ispure[ipp];
}
else {
for(ipp=0;ipp<c->npure_ss;ipp++)
c->xi_pp[icc][ipp][ll2][ll3]+=wfac_ispure[ipp];
}
}
}
}
}
if((!(c->pure_any)) && (ll2 != ll3)) { //Can use symmetry
for(icc=0;icc<c->npcl;icc++) {
if(c->has_00)
c->xi_00[icc][ll3][ll2]=c->xi_00[icc][ll2][ll3];
if(c->has_0s)
c->xi_0s[icc][0][ll3][ll2]=c->xi_0s[icc][0][ll2][ll3];
if(c->has_ss) {
c->xi_pp[icc][0][ll3][ll2]=c->xi_pp[icc][0][ll2][ll3];
c->xi_mm[icc][0][ll3][ll2]=c->xi_mm[icc][0][ll2][ll3];
}
}
}
}
} //end omp for
free(wigner_00);
free(wigner_ss1);
if(has_ss2)
free(wigner_ss2);
free(wigner_12);
free(wigner_02);
} //end omp parallel
// Fill out lower triangle
if(l_toeplitz > 0) {
int l2, l3;
for(l2=c->lmax+l_exact-l_toeplitz;l2<=c->lmax;l2++) {
for(l3=l_exact;l3<=l2+l_toeplitz-c->lmax;l3++){
flouble **mat;
flouble m;
int lx=l_exact+l2-l3;
for(ic=0;ic<c->npcl;ic++) {
if(c->has_00) {
mat = c->xi_00[ic];
m=mat[lx][l_exact]*sqrt(fabs(mat[l2][l2]*mat[l3][l3]/
(mat[lx][lx]*mat[l_exact][l_exact])));
mat[l2][l3]=m;
mat[l3][l2]=m;
}
if(c->has_0s) {
mat = c->xi_0s[ic][0];
m=mat[lx][l_exact]*sqrt(fabs(mat[l2][l2]*mat[l3][l3]/
(mat[lx][lx]*mat[l_exact][l_exact])));
mat[l2][l3]=m;
mat[l3][l2]=m;
}
if(c->has_ss) {
mat = c->xi_pp[ic][0];
m=mat[lx][l_exact]*sqrt(fabs(mat[l2][l2]*mat[l3][l3]/
(mat[lx][lx]*mat[l_exact][l_exact])));
mat[l2][l3]=m;
mat[l3][l2]=m;
mat = c->xi_mm[ic][0];
m=mat[lx][l_exact]*sqrt(fabs(mat[l2][l2]*mat[l3][l3]/
(mat[lx][lx]*mat[l_exact][l_exact])));
mat[l2][l3]=m;
mat[l3][l2]=m;
}
}
}
}
}
return c;
}
void nmt_master_calculator_free(nmt_master_calculator *c)
{
int ii, ip, ic;
if(c->has_00) {
for(ic=0;ic<c->npcl;ic++) {
for(ii=0;ii<=c->lmax;ii++)
free(c->xi_00[ic][ii]);
free(c->xi_00[ic]);
}
free(c->xi_00);
}
if(c->has_0s) {
for(ic=0;ic<c->npcl;ic++) {
for(ip=0;ip<c->npure_0s;ip++) {
for(ii=0;ii<=c->lmax;ii++)
free(c->xi_0s[ic][ip][ii]);
free(c->xi_0s[ic][ip]);
}
free(c->xi_0s[ic]);
}
free(c->xi_0s);
}
if(c->has_ss) {
for(ic=0;ic<c->npcl;ic++) {
for(ip=0;ip<c->npure_ss;ip++) {
for(ii=0;ii<=c->lmax;ii++) {
free(c->xi_pp[ic][ip][ii]);
free(c->xi_mm[ic][ip][ii]);
}
free(c->xi_pp[ic][ip]);
free(c->xi_mm[ic][ip]);
}
free(c->xi_pp[ic]);
free(c->xi_mm[ic]);
}
free(c->xi_pp);
free(c->xi_mm);
}
free(c);
}
//Computes binned coupling matrix
// fl1,fl2 (in) : fields we're correlating
// coupling_matrix_out (out) : unbinned coupling matrix
nmt_workspace *nmt_compute_coupling_matrix(nmt_field *fl1,nmt_field *fl2,
nmt_binning_scheme *bin,int is_teb,
int niter,int lmax_mask,
int l_toeplitz,int l_exact,int dl_band)
{
int l2,lmax_large,lmax_fields;
nmt_workspace *w;
int n_cl=fl1->nmaps*fl2->nmaps;
if(is_teb) {
if(!((fl1->spin==0) && (fl2->spin!=0)))
report_error(NMT_ERROR_INCONSISTENT,"For T-E-B MCM the first input field must be spin-0 and the second spin-!=0\n");
n_cl=7;
}
if(!(nmt_diff_curvedsky_info(fl1->cs,fl2->cs)))
report_error(NMT_ERROR_CONSISTENT_RESO,
"Can't correlate fields with different pixelizations"
" or resolutions\n");
if(bin->ell_max>he_get_lmax(fl1->cs))
report_error(NMT_ERROR_CONSISTENT_RESO,
"Requesting bandpowers for too high a "
"multipole given map resolution\n");
lmax_fields=fl1->lmax; // ell_max for the maps
lmax_large=lmax_fields; // ell_max for the masks
if(lmax_mask>lmax_large)
lmax_large=lmax_mask;
w=nmt_workspace_new(fl1->cs,n_cl,bin,is_teb,
lmax_fields,lmax_large);
for(l2=0;l2<=w->lmax_fields;l2++)
w->beam_prod[l2]=fl1->beam[l2]*fl2->beam[l2];
he_anafast(&(fl1->mask),&(fl2->mask),0,0,&(w->pcl_masks),fl1->cs,w->lmax_mask,niter);
for(l2=0;l2<=w->lmax_mask;l2++)
w->pcl_masks[l2]*=(2*l2+1.)/(4*M_PI);
// Compute coupling coefficients
nmt_master_calculator *c=nmt_compute_master_coefficients(w->lmax, w->lmax_mask,
1, &(w->pcl_masks),
fl1->spin, fl2->spin,
fl1->pure_e,fl1->pure_b,
fl2->pure_e,fl2->pure_b,
is_teb, l_toeplitz, l_exact, dl_band);
// Apply coupling coefficients
#pragma omp parallel default(none) \
shared(w,fl1,fl2,c)
{
int ll2,ll3;
int pe1=fl1->pure_e,pe2=fl2->pure_e,pb1=fl1->pure_b,pb2=fl2->pure_b;
int sign_overall=1;
if((fl1->spin+fl2->spin) & 1)
sign_overall=-1;
#pragma omp for schedule(dynamic)
for(ll2=0;ll2<=w->lmax;ll2++) {
for(ll3=0;ll3<=w->lmax;ll3++) {
double fac=(2*ll3+1.)*sign_overall;
if(w->ncls==1)
w->coupling_matrix_unbinned[1*ll2+0][1*ll3+0]=fac*c->xi_00[0][ll2][ll3]; //TT,TT
if(w->ncls==2) {
w->coupling_matrix_unbinned[2*ll2+0][2*ll3+0]=fac*c->xi_0s[0][pe1+pe2][ll2][ll3]; //TE,TE
w->coupling_matrix_unbinned[2*ll2+1][2*ll3+1]=fac*c->xi_0s[0][pb1+pb2][ll2][ll3]; //TB,TB
}
if(w->ncls==4) {
w->coupling_matrix_unbinned[4*ll2+0][4*ll3+3]=fac*c->xi_mm[0][pe1+pe2][ll2][ll3]; //EE,BB
w->coupling_matrix_unbinned[4*ll2+1][4*ll3+2]=-fac*c->xi_mm[0][pe1+pb2][ll2][ll3]; //EB,BE
w->coupling_matrix_unbinned[4*ll2+2][4*ll3+1]=-fac*c->xi_mm[0][pb1+pe2][ll2][ll3]; //BE,EB
w->coupling_matrix_unbinned[4*ll2+3][4*ll3+0]=fac*c->xi_mm[0][pb1+pb2][ll2][ll3]; //BB,EE
w->coupling_matrix_unbinned[4*ll2+0][4*ll3+0]=fac*c->xi_pp[0][pe1+pe2][ll2][ll3]; //EE,EE
w->coupling_matrix_unbinned[4*ll2+1][4*ll3+1]=fac*c->xi_pp[0][pe1+pb2][ll2][ll3]; //EB,EB
w->coupling_matrix_unbinned[4*ll2+2][4*ll3+2]=fac*c->xi_pp[0][pb1+pe2][ll2][ll3]; //BE,BE
w->coupling_matrix_unbinned[4*ll2+3][4*ll3+3]=fac*c->xi_pp[0][pb1+pb2][ll2][ll3]; //BB,BB
}
if(w->ncls==7) {
w->coupling_matrix_unbinned[7*ll2+0][7*ll3+0]=fac*c->xi_00[0][ll2][ll3]; //TT,TT
w->coupling_matrix_unbinned[7*ll2+1][7*ll3+1]=fac*c->xi_0s[0][pe2][ll2][ll3]; //TE,TE
w->coupling_matrix_unbinned[7*ll2+2][7*ll3+2]=fac*c->xi_0s[0][pb2][ll2][ll3]; //TB,TB
w->coupling_matrix_unbinned[7*ll2+3][7*ll3+6]=fac*c->xi_mm[0][pe2+pe2][ll2][ll3]; //EE,BB
w->coupling_matrix_unbinned[7*ll2+4][7*ll3+5]=-fac*c->xi_mm[0][pe2+pb2][ll2][ll3]; //EB,BE
w->coupling_matrix_unbinned[7*ll2+5][7*ll3+4]=-fac*c->xi_mm[0][pb2+pe2][ll2][ll3]; //BE,EB
w->coupling_matrix_unbinned[7*ll2+6][7*ll3+3]=fac*c->xi_mm[0][pb2+pb2][ll2][ll3]; //BB,EE
w->coupling_matrix_unbinned[7*ll2+3][7*ll3+3]=fac*c->xi_pp[0][pe2+pe2][ll2][ll3]; //EE,EE
w->coupling_matrix_unbinned[7*ll2+4][7*ll3+4]=fac*c->xi_pp[0][pe2+pb2][ll2][ll3]; //EB,EB
w->coupling_matrix_unbinned[7*ll2+5][7*ll3+5]=fac*c->xi_pp[0][pb2+pe2][ll2][ll3]; //BE,BE
w->coupling_matrix_unbinned[7*ll2+6][7*ll3+6]=fac*c->xi_pp[0][pb2+pb2][ll2][ll3]; //BB,BB
}
}
} //end omp for
} //end omp parallel
nmt_master_calculator_free(c);
bin_coupling_matrix(w);
return w;
}
void nmt_compute_uncorr_noise_deprojection_bias(nmt_field *fl1,flouble *map_var,flouble **cl_bias,
int niter)
{
int ii;
long ip;
int nspec=fl1->nmaps*fl1->nmaps;
int lmax=fl1->lmax;
if(fl1->lite)
report_error(NMT_ERROR_LITE,"No deprojection bias for lightweight fields!\n");
for(ii=0;ii<nspec;ii++) {
for(ip=0;ip<=lmax;ip++)
cl_bias[ii][ip]=0;
}
if(fl1->ntemp>0) {
//Allocate dummy maps and alms
flouble **map_dum=my_malloc(fl1->nmaps*sizeof(flouble *));
fcomplex **alm_dum=my_malloc(fl1->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl1->nmaps;ii++) {
map_dum[ii]=my_malloc(fl1->npix*sizeof(flouble));
alm_dum[ii]=my_malloc(he_nalms(fl1->lmax)*sizeof(fcomplex));
}
flouble **cl_dum;
cl_dum=my_malloc(nspec*sizeof(flouble *));
for(ii=0;ii<nspec;ii++)
cl_dum[ii]=my_calloc((lmax+1),sizeof(flouble));
int iti,itj,itp,itq,im1;
flouble *mat_prod=my_calloc(fl1->ntemp*fl1->ntemp,sizeof(flouble));
for(iti=0;iti<fl1->ntemp;iti++) {
for(itj=0;itj<fl1->ntemp;itj++) {
double nij=gsl_matrix_get(fl1->matrix_M,iti,itj);
for(im1=0;im1<fl1->nmaps;im1++) {
he_map_product(fl1->cs,fl1->temp[itj][im1],map_var,map_dum[im1]); //sigma^2*f^j
he_map_product(fl1->cs,map_dum[im1],fl1->mask,map_dum[im1]); //v*sigma^2*f^j
he_map_product(fl1->cs,map_dum[im1],fl1->mask,map_dum[im1]); //v^2*sigma^2*f^j
}
//Int[v^2*sigma^2*f^j*f^r]
for(im1=0;im1<fl1->nmaps;im1++)
mat_prod[iti*fl1->ntemp+itj]+=he_map_dot(fl1->cs,map_dum[im1],fl1->temp[iti][im1]);
//SHT[v^2*sigma^2*f^j]
he_map2alm(fl1->cs,fl1->lmax,1,fl1->spin,map_dum,alm_dum,niter);
//Sum_m(SHT[v^2*sigma^2*f^j]*f^i)/(2l+1)
he_alm2cl(alm_dum,fl1->a_temp[iti],fl1->spin,fl1->spin,cl_dum,lmax);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<=lmax;ip++)
cl_bias[im1][ip]-=2*cl_dum[im1][ip]*nij;
}
}
}
for(iti=0;iti<fl1->ntemp;iti++) {
for(itp=0;itp<fl1->ntemp;itp++) {
//Sum_m(f^i*f^p*)/(2l+1)
he_alm2cl(fl1->a_temp[iti],fl1->a_temp[itp],fl1->spin,fl1->spin,cl_dum,lmax);
for(itj=0;itj<fl1->ntemp;itj++) {
double mij=gsl_matrix_get(fl1->matrix_M,iti,itj);
for(itq=0;itq<fl1->ntemp;itq++) {
double npq=gsl_matrix_get(fl1->matrix_M,itp,itq);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<=lmax;ip++)
cl_bias[im1][ip]+=cl_dum[im1][ip]*mat_prod[itj*fl1->ntemp+itq]*mij*npq;
}
}
}
}
}
free(mat_prod);
for(ii=0;ii<fl1->nmaps;ii++) {
free(map_dum[ii]);
free(alm_dum[ii]);
}
free(map_dum);
free(alm_dum);
for(ii=0;ii<nspec;ii++)
free(cl_dum[ii]);
free(cl_dum);
}
}
void nmt_compute_deprojection_bias(nmt_field *fl1,nmt_field *fl2,
flouble **cl_proposal,flouble **cl_bias,int niter)
{
int ii;
flouble **cl_dum;
long ip;
int nspec=fl1->nmaps*fl2->nmaps;
int lmax=fl1->lmax;
if(fl1->lite || fl2->lite)
report_error(NMT_ERROR_LITE,"No deprojection bias for lightweight fields!\n");
if(!(nmt_diff_curvedsky_info(fl1->cs,fl2->cs)))
report_error(NMT_ERROR_CONSISTENT_RESO,"Can't correlate fields with different pixelizations\n");
cl_dum=my_malloc(nspec*sizeof(flouble *));
for(ii=0;ii<nspec;ii++) {
cl_dum[ii]=my_calloc((lmax+1),sizeof(flouble));
for(ip=0;ip<=lmax;ip++)
cl_bias[ii][ip]=0;
}
//TODO: some terms (e.g. C^ab*SHT[w*g^j]) could be precomputed
//TODO: if fl1=fl2 F2=F3
//Allocate dummy maps and alms
flouble **map_1_dum=my_malloc(fl1->nmaps*sizeof(flouble *));
fcomplex **alm_1_dum=my_malloc(fl1->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl1->nmaps;ii++) {
map_1_dum[ii]=my_malloc(fl1->npix*sizeof(flouble));
alm_1_dum[ii]=my_malloc(he_nalms(fl1->lmax)*sizeof(fcomplex));
}
flouble **map_2_dum=my_malloc(fl2->nmaps*sizeof(flouble *));
fcomplex **alm_2_dum=my_malloc(fl2->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl2->nmaps;ii++) {
map_2_dum[ii]=my_malloc(fl1->npix*sizeof(flouble));
alm_2_dum[ii]=my_malloc(he_nalms(fl1->lmax)*sizeof(fcomplex));
}
if(fl2->ntemp>0) {
int iti;
for(iti=0;iti<fl2->ntemp;iti++) {
int itj;
for(itj=0;itj<fl2->ntemp;itj++) {
int im1,im2;
double nij=gsl_matrix_get(fl2->matrix_M,iti,itj);
//w*g^j
for(im2=0;im2<fl2->nmaps;im2++)
he_map_product(fl2->cs,fl2->temp[itj][im2],fl2->mask,map_2_dum[im2]);
//SHT[w*g^j]
he_map2alm(fl2->cs,fl2->lmax,1,fl2->spin,map_2_dum,alm_2_dum,niter);
//C^ab*SHT[w*g^j]
for(im1=0;im1<fl1->nmaps;im1++) {
he_zero_alm(fl1->lmax,alm_1_dum[im1]);
for(im2=0;im2<fl2->nmaps;im2++)
he_alter_alm(lmax,-1.,alm_2_dum[im2],alm_1_dum[im1],cl_proposal[im1*fl2->nmaps+im2],1);
}
//SHT^-1[C^ab*SHT[w*g^j]]
he_alm2map(fl1->cs,fl1->lmax,1,fl1->spin,map_1_dum,alm_1_dum);
//SHT[v*SHT^-1[C^ab*SHT[w*g^j]]]
purify_generic(fl1,fl1->mask,fl1->a_mask,map_1_dum,alm_1_dum,niter);
//Sum_m(SHT[v*SHT^-1[C^ab*SHT[w*g^j]]]*g^i*)/(2l+1)
he_alm2cl(alm_1_dum,fl2->a_temp[iti],fl1->spin,fl2->spin,cl_dum,lmax);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<=lmax;ip++)
cl_bias[im1][ip]-=cl_dum[im1][ip]*nij;
}
}
}
}
if(fl1->ntemp>0) {
int iti;
for(iti=0;iti<fl1->ntemp;iti++) {
int itj;
for(itj=0;itj<fl1->ntemp;itj++) {
int im1,im2;
double mij=gsl_matrix_get(fl1->matrix_M,iti,itj);
//v*f^j
for(im1=0;im1<fl1->nmaps;im1++)
he_map_product(fl1->cs,fl1->temp[itj][im1],fl1->mask,map_1_dum[im1]);
//SHT[v*f^j]
he_map2alm(fl1->cs,fl1->lmax,1,fl1->spin,map_1_dum,alm_1_dum,niter);
//C^abT*SHT[v*f^j]
for(im2=0;im2<fl2->nmaps;im2++) {
he_zero_alm(fl2->lmax,alm_2_dum[im2]);
for(im1=0;im1<fl1->nmaps;im1++)
he_alter_alm(lmax,-1.,alm_1_dum[im1],alm_2_dum[im2],cl_proposal[im1*fl2->nmaps+im2],1);
}
//SHT^-1[C^abT*SHT[v*f^j]]
he_alm2map(fl2->cs,fl2->lmax,1,fl2->spin,map_2_dum,alm_2_dum);
//SHT[w*SHT^-1[C^abT*SHT[v*f^j]]]
purify_generic(fl2,fl2->mask,fl2->a_mask,map_2_dum,alm_2_dum,niter);
//Sum_m(f^i*SHT[w*SHT^-1[C^abT*SHT[v*f^j]]]^*)/(2l+1)
he_alm2cl(fl1->a_temp[iti],alm_2_dum,fl1->spin,fl2->spin,cl_dum,lmax);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<=lmax;ip++)
cl_bias[im1][ip]-=cl_dum[im1][ip]*mij;
}
}
}
}
if((fl1->ntemp>0) && (fl2->ntemp>0)) {
int iti,itj,itp,itq,im1,im2;
flouble *mat_prod=my_calloc(fl1->ntemp*fl2->ntemp,sizeof(flouble));
for(itj=0;itj<fl1->ntemp;itj++) {
for(itq=0;itq<fl2->ntemp;itq++) {
//w*g^q
for(im2=0;im2<fl2->nmaps;im2++)
he_map_product(fl2->cs,fl2->temp[itq][im2],fl2->mask,map_2_dum[im2]);
//SHT[w*g^q]
he_map2alm(fl2->cs,fl2->lmax,1,fl2->spin,map_2_dum,alm_2_dum,niter);
//C^ab*SHT[w*g^q]
for(im1=0;im1<fl1->nmaps;im1++) {
he_zero_alm(fl1->lmax,alm_1_dum[im1]);
for(im2=0;im2<fl2->nmaps;im2++)
he_alter_alm(lmax,-1.,alm_2_dum[im2],alm_1_dum[im1],cl_proposal[im1*fl2->nmaps+im2],1);
}
//SHT^-1[C^ab*SHT[w*g^q]]
he_alm2map(fl1->cs,fl1->lmax,1,fl1->spin,map_1_dum,alm_1_dum);
for(im1=0;im1<fl1->nmaps;im1++) {
//v*SHT^-1[C^ab*SHT[w*g^q]]
he_map_product(fl1->cs,map_1_dum[im1],fl1->mask,map_1_dum[im1]);
//Int[f^jT*v*SHT^-1[C^ab*SHT[w*g^q]]]
mat_prod[itj*fl2->ntemp+itq]+=he_map_dot(fl1->cs,map_1_dum[im1],fl1->temp[itj][im1]);
}
}
}
for(iti=0;iti<fl1->ntemp;iti++) {
for(itp=0;itp<fl2->ntemp;itp++) {
//Sum_m(f^i*g^p*)/(2l+1)
he_alm2cl(fl1->a_temp[iti],fl2->a_temp[itp],fl1->spin,fl2->spin,cl_dum,lmax);
for(itj=0;itj<fl1->ntemp;itj++) {
double mij=gsl_matrix_get(fl1->matrix_M,iti,itj);
for(itq=0;itq<fl2->ntemp;itq++) {
double npq=gsl_matrix_get(fl2->matrix_M,itp,itq);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<=lmax;ip++)
cl_bias[im1][ip]+=cl_dum[im1][ip]*mat_prod[itj*fl2->ntemp+itq]*mij*npq;
}
}
}
}
}
free(mat_prod);
}
for(ii=0;ii<fl1->nmaps;ii++) {
free(map_1_dum[ii]);
free(alm_1_dum[ii]);
}
free(map_1_dum);
free(alm_1_dum);
for(ii=0;ii<fl2->nmaps;ii++) {
free(map_2_dum[ii]);
free(alm_2_dum[ii]);
}
free(map_2_dum);
free(alm_2_dum);
for(ii=0;ii<nspec;ii++)
free(cl_dum[ii]);
free(cl_dum);
}
void nmt_couple_cl_l(nmt_workspace *w,flouble **cl_in,flouble **cl_out)
{
int l1;
for(l1=0;l1<=w->lmax;l1++) {
int icl1=0;
for(icl1=0;icl1<w->ncls;icl1++) {
int l2;
flouble cl=0;
flouble *mrow=w->coupling_matrix_unbinned[w->ncls*l1+icl1];
for(l2=0;l2<=w->lmax;l2++) {
int icl2=0;
flouble beamprod=w->beam_prod[l2];
for(icl2=0;icl2<w->ncls;icl2++)
cl+=mrow[w->ncls*l2+icl2]*beamprod*cl_in[icl2][l2];
}
cl_out[icl1][l1]=cl;
}
}
}
void nmt_compute_bandpower_windows(nmt_workspace *w,double *bpw_win_out)
{
// Bin mode-coupling matrix
gsl_matrix *mat_coupled_bin=gsl_matrix_calloc(w->ncls*w->bin->n_bands,
w->ncls*(w->lmax+1));
double *bpws=my_malloc(w->ncls*w->bin->n_bands*w->ncls*(w->lmax+1));
int icl1;
for(icl1=0;icl1<w->ncls;icl1++) {
int ib1;
for(ib1=0;ib1<w->bin->n_bands;ib1++) {
int i1;
int index_b1=w->ncls*ib1+icl1;
for(i1=0;i1<w->bin->nell_list[ib1];i1++) {
int icl2;
int l1=w->bin->ell_list[ib1][i1];
int index_1=w->ncls*l1+icl1;
double wf=w->bin->f_ell[ib1][i1]*w->bin->w_list[ib1][i1];
double *matrix_row=w->coupling_matrix_unbinned[index_1];
for(icl2=0;icl2<w->ncls;icl2++) {
int l2;
for(l2=0;l2<=w->lmax;l2++) {
int index_2=w->ncls*l2+icl2;
double beamprod=w->beam_prod[l2];
double m0=gsl_matrix_get(mat_coupled_bin,
index_b1,index_2);
gsl_matrix_set(mat_coupled_bin,index_b1,index_2,
m0+matrix_row[index_2]*beamprod*wf);
}
}
}
}
}
gsl_matrix *inv_mcm=gsl_matrix_alloc(w->ncls*w->bin->n_bands,
w->ncls*w->bin->n_bands);
gsl_matrix *bpw_win=gsl_matrix_calloc(w->ncls*w->bin->n_bands,
w->ncls*(w->lmax+1));
//Inverse binned MCM
gsl_linalg_LU_invert(w->coupling_matrix_binned,
w->coupling_matrix_perm,
inv_mcm);
//M^-1 * M
gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1,inv_mcm,mat_coupled_bin,0,bpw_win);
for(icl1=0;icl1<w->ncls;icl1++) {
int ib1;
for(ib1=0;ib1<w->bin->n_bands;ib1++) {
int icl2;
int index_1=w->ncls*ib1+icl1;
for(icl2=0;icl2<w->ncls;icl2++) {
int l2;
for(l2=0;l2<=w->lmax;l2++) {
int index_2=w->ncls*l2+icl2;
int index=index_1*w->ncls*(w->lmax+1)+index_2;
bpw_win_out[index]=gsl_matrix_get(bpw_win,index_1,index_2);
}
}
}
}
gsl_matrix_free(bpw_win);
gsl_matrix_free(inv_mcm);
gsl_matrix_free(mat_coupled_bin);
}
void nmt_decouple_cl_l(nmt_workspace *w,flouble **cl_in,flouble **cl_noise_in,
flouble **cl_bias,flouble **cl_out)
{
int icl,ib2,l2;
gsl_vector *dl_map_bad_b=gsl_vector_alloc(w->ncls*w->bin->n_bands);
gsl_vector *dl_map_good_b=gsl_vector_alloc(w->ncls*w->bin->n_bands);
//Bin coupled power spectrum
for(icl=0;icl<w->ncls;icl++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++) {
int i2;
double dl_b=0;
for(i2=0;i2<w->bin->nell_list[ib2];i2++) {
l2=w->bin->ell_list[ib2][i2];
dl_b+=(cl_in[icl][l2]-cl_noise_in[icl][l2]-cl_bias[icl][l2])*w->bin->f_ell[ib2][i2]*w->bin->w_list[ib2][i2];
}
gsl_vector_set(dl_map_bad_b,w->ncls*ib2+icl,dl_b);
}
}
gsl_linalg_LU_solve(w->coupling_matrix_binned,w->coupling_matrix_perm,dl_map_bad_b,dl_map_good_b);
for(icl=0;icl<w->ncls;icl++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++)
cl_out[icl][ib2]=gsl_vector_get(dl_map_good_b,w->ncls*ib2+icl);
}
gsl_vector_free(dl_map_bad_b);
gsl_vector_free(dl_map_good_b);
}
void nmt_compute_coupled_cell(nmt_field *fl1,nmt_field *fl2,flouble **cl_out)
{
if(fl1->mask_only || fl2->mask_only)
report_error(NMT_ERROR_LITE,"Can't correlate mapless fields!\n");
if(fl1->lmax!=fl2->lmax)
report_error(NMT_ERROR_CONSISTENT_RESO,"Can't correlate fields with different resolutions\n");
he_alm2cl(fl1->alms,fl2->alms,fl1->spin,fl2->spin,cl_out,fl1->lmax);
}
nmt_workspace *nmt_compute_power_spectra(nmt_field *fl1,nmt_field *fl2,
nmt_binning_scheme *bin,nmt_workspace *w0,
flouble **cl_noise,flouble **cl_proposal,flouble **cl_out,
int niter,int lmax_mask,int l_toeplitz,
int l_exact,int dl_band)
{
int ii;
flouble **cl_bias,**cl_data;
nmt_workspace *w;
if(w0==NULL)
w=nmt_compute_coupling_matrix(fl1,fl2,bin,0,niter,lmax_mask,l_toeplitz,l_exact,dl_band);
else {
w=w0;
if(w->lmax>fl1->lmax)
report_error(NMT_ERROR_CONSISTENT_RESO,"Workspace does not match map resolution\n");
}
cl_bias=my_malloc(w->ncls*sizeof(flouble *));
cl_data=my_malloc(w->ncls*sizeof(flouble *));
for(ii=0;ii<w->ncls;ii++) {
cl_bias[ii]=my_calloc((fl1->lmax+1),sizeof(flouble));
cl_data[ii]=my_calloc((fl1->lmax+1),sizeof(flouble));
}
nmt_compute_coupled_cell(fl1,fl2,cl_data);
nmt_compute_deprojection_bias(fl1,fl2,cl_proposal,cl_bias,niter);
nmt_decouple_cl_l(w,cl_data,cl_noise,cl_bias,cl_out);
for(ii=0;ii<w->ncls;ii++) {
free(cl_bias[ii]);
free(cl_data[ii]);
}
free(cl_bias);
free(cl_data);
return w;
}
|
fac_zero_stencilcoef.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_sstruct_ls.h"
#include "fac.h"
#define AbsStencilShape(stencil, abs_shape) \
{ \
HYPRE_Int ii,jj,kk; \
ii = hypre_IndexX(stencil); \
jj = hypre_IndexY(stencil); \
kk = hypre_IndexZ(stencil); \
abs_shape= hypre_abs(ii) + hypre_abs(jj) + hypre_abs(kk); \
}
/*--------------------------------------------------------------------------
* hypre_FacZeroCFSten: Zeroes the coarse stencil coefficients that reach
* into an underlying coarsened refinement box.
* Algo: For each cbox
* {
* 1) refine cbox and expand by one in each direction
* 2) boxman_intersect with the fboxman
* 3) loop over intersection boxes to see if stencil
* reaches over.
* }
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FacZeroCFSten( hypre_SStructPMatrix *Af,
hypre_SStructPMatrix *Ac,
hypre_SStructGrid *grid,
HYPRE_Int fine_part,
hypre_Index rfactors )
{
hypre_BoxManager *fboxman;
hypre_BoxManEntry **boxman_entries;
HYPRE_Int nboxman_entries;
hypre_SStructPGrid *p_cgrid;
hypre_Box fgrid_box;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_Box scaled_box;
hypre_Box *shift_ibox;
hypre_StructMatrix *smatrix;
hypre_StructStencil *stencils;
HYPRE_Int stencil_size;
hypre_Index refine_factors, upper_shift;
hypre_Index stride;
hypre_Index stencil_shape;
hypre_Index zero_index, ilower, iupper;
HYPRE_Int nvars, var1, var2;
HYPRE_Int ndim;
hypre_Box *ac_dbox;
HYPRE_Real *ac_ptr;
hypre_Index loop_size;
HYPRE_Int iac;
HYPRE_Int ci, i, j;
HYPRE_Int abs_shape;
HYPRE_Int ierr = 0;
p_cgrid = hypre_SStructPMatrixPGrid(Ac);
nvars = hypre_SStructPMatrixNVars(Ac);
ndim = hypre_SStructPGridNDim(p_cgrid);
hypre_BoxInit(&fgrid_box, ndim);
hypre_BoxInit(&scaled_box, ndim);
hypre_ClearIndex(zero_index);
hypre_ClearIndex(stride);
hypre_ClearIndex(upper_shift);
for (i= 0; i< ndim; i++)
{
stride[i]= 1;
upper_shift[i]= rfactors[i]-1;
}
hypre_CopyIndex(rfactors, refine_factors);
if (ndim < 3)
{
for (i= ndim; i< 3; i++)
{
refine_factors[i]= 1;
}
}
for (var1= 0; var1< nvars; var1++)
{
cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(Ac), var1);
cgrid_boxes= hypre_StructGridBoxes(cgrid);
fboxman= hypre_SStructGridBoxManager(grid, fine_part, var1);
/*------------------------------------------------------------------
* For each parent coarse box find all fboxes that may be connected
* through a stencil entry- refine this box, expand it by one
* in each direction, and boxman_intersect with fboxman
*------------------------------------------------------------------*/
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci);
hypre_StructMapCoarseToFine(hypre_BoxIMin(cgrid_box), zero_index,
refine_factors, hypre_BoxIMin(&scaled_box));
hypre_StructMapCoarseToFine(hypre_BoxIMax(cgrid_box), upper_shift,
refine_factors, hypre_BoxIMax(&scaled_box));
hypre_SubtractIndexes(hypre_BoxIMin(&scaled_box), stride, 3,
hypre_BoxIMin(&scaled_box));
hypre_AddIndexes(hypre_BoxIMax(&scaled_box), stride, 3,
hypre_BoxIMax(&scaled_box));
hypre_BoxManIntersect(fboxman, hypre_BoxIMin(&scaled_box),
hypre_BoxIMax(&scaled_box), &boxman_entries,
&nboxman_entries);
for (var2= 0; var2< nvars; var2++)
{
stencils= hypre_SStructPMatrixSStencil(Ac, var1, var2);
if (stencils != NULL)
{
stencil_size= hypre_StructStencilSize(stencils);
smatrix = hypre_SStructPMatrixSMatrix(Ac, var1, var2);
ac_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix),
ci);
/*---------------------------------------------------------
* Find the stencil coefficients that must be zeroed off.
* Loop over all possible boxes.
*---------------------------------------------------------*/
for (i= 0; i< stencil_size; i++)
{
hypre_CopyIndex(hypre_StructStencilElement(stencils, i),
stencil_shape);
AbsStencilShape(stencil_shape, abs_shape);
if (abs_shape) /* non-centre stencils are zeroed */
{
/* look for connecting fboxes that must be zeroed. */
for (j= 0; j< nboxman_entries; j++)
{
hypre_BoxManEntryGetExtents(boxman_entries[j], ilower, iupper);
hypre_BoxSetExtents(&fgrid_box, ilower, iupper);
shift_ibox= hypre_CF_StenBox(&fgrid_box, cgrid_box, stencil_shape,
refine_factors, ndim);
if ( hypre_BoxVolume(shift_ibox) )
{
ac_ptr= hypre_StructMatrixExtractPointerByIndex(smatrix,
ci,
stencil_shape);
hypre_BoxGetSize(shift_ibox, loop_size);
hypre_BoxLoop1Begin(ndim, loop_size,
ac_dbox, hypre_BoxIMin(shift_ibox),
stride, iac);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iac) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iac)
{
ac_ptr[iac] = 0.0;
}
hypre_BoxLoop1End(iac);
} /* if ( hypre_BoxVolume(shift_ibox) ) */
hypre_BoxDestroy(shift_ibox);
} /* for (j= 0; j< nboxman_entries; j++) */
} /* if (abs_shape) */
} /* for (i= 0; i< stencil_size; i++) */
} /* if (stencils != NULL) */
} /* for (var2= 0; var2< nvars; var2++) */
hypre_TFree(boxman_entries);
} /* hypre_ForBoxI ci */
} /* for (var1= 0; var1< nvars; var1++) */
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_FacZeroFCSten: Zeroes the fine stencil coefficients that reach
* into a coarse box.
* Idea: zero off any stencil connection of a fine box that does not
* connect to a sibling box
* Algo: For each fbox
* {
* 1) expand by one in each direction so that sibling boxes can be
* reached
* 2) boxman_intersect with the fboxman to get all fboxes including
* itself and the siblings
* 3) loop over intersection boxes, shift them in the stencil
* direction (now we are off the fbox), and subtract any sibling
* extents. The remaining chunks (boxes of a box_array) are
* the desired but shifted extents.
* 4) shift these shifted extents in the negative stencil direction
* to get back into fbox. Zero-off the matrix over these latter
* extents.
* }
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FacZeroFCSten( hypre_SStructPMatrix *A,
hypre_SStructGrid *grid,
HYPRE_Int fine_part)
{
MPI_Comm comm= hypre_SStructGridComm(grid);
hypre_BoxManager *fboxman;
hypre_BoxManEntry **boxman_entries;
HYPRE_Int nboxman_entries;
hypre_SStructPGrid *p_fgrid;
hypre_StructGrid *fgrid;
hypre_BoxArray *fgrid_boxes;
hypre_Box *fgrid_box;
hypre_Box scaled_box;
hypre_BoxArray *intersect_boxes, *tmp_box_array1, *tmp_box_array2;
hypre_StructMatrix *smatrix;
hypre_StructStencil *stencils;
HYPRE_Int stencil_size;
hypre_Index stride, ilower, iupper;
hypre_Index stencil_shape, shift_index;
hypre_Box shift_ibox;
hypre_Box intersect_box;
hypre_Index size_ibox;
HYPRE_Int nvars, var1, var2;
HYPRE_Int ndim;
hypre_Box *a_dbox;
HYPRE_Real *a_ptr;
hypre_Index loop_size;
HYPRE_Int ia;
HYPRE_Int fi, fj, i, j;
HYPRE_Int abs_shape;
HYPRE_Int myid, proc;
HYPRE_Int ierr = 0;
hypre_MPI_Comm_rank(comm, &myid);
p_fgrid = hypre_SStructPMatrixPGrid(A);
nvars = hypre_SStructPMatrixNVars(A);
ndim = hypre_SStructPGridNDim(p_fgrid);
hypre_BoxInit(&scaled_box, ndim);
hypre_BoxInit(&shift_ibox, ndim);
hypre_BoxInit(&intersect_box, ndim);
hypre_ClearIndex(stride);
for (i= 0; i< ndim; i++)
{
stride[i]= 1;
}
tmp_box_array1= hypre_BoxArrayCreate(1, ndim);
for (var1= 0; var1< nvars; var1++)
{
fgrid = hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A), var1);
fgrid_boxes= hypre_StructGridBoxes(fgrid);
fboxman = hypre_SStructGridBoxManager(grid, fine_part, var1);
hypre_ForBoxI(fi, fgrid_boxes)
{
fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi);
hypre_ClearIndex(size_ibox);
for (i= 0; i< ndim; i++)
{
size_ibox[i] = hypre_BoxSizeD(fgrid_box, i) - 1;
}
/* expand fgrid_box & boxman_intersect with fboxman. */
hypre_SubtractIndexes(hypre_BoxIMin(fgrid_box), stride, 3,
hypre_BoxIMin(&scaled_box));
hypre_AddIndexes(hypre_BoxIMax(fgrid_box), stride, 3,
hypre_BoxIMax(&scaled_box));
hypre_BoxManIntersect(fboxman, hypre_BoxIMin(&scaled_box),
hypre_BoxIMax(&scaled_box), &boxman_entries,
&nboxman_entries);
for (var2= 0; var2< nvars; var2++)
{
stencils= hypre_SStructPMatrixSStencil(A, var1, var2);
if (stencils != NULL)
{
stencil_size= hypre_StructStencilSize(stencils);
smatrix = hypre_SStructPMatrixSMatrix(A, var1, var2);
a_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix),
fi);
for (i= 0; i< stencil_size; i++)
{
hypre_CopyIndex(hypre_StructStencilElement(stencils, i),
stencil_shape);
AbsStencilShape(stencil_shape, abs_shape);
if (abs_shape) /* non-centre stencils are zeroed */
{
hypre_SetIndex3(shift_index,
size_ibox[0]*stencil_shape[0],
size_ibox[1]*stencil_shape[1],
size_ibox[2]*stencil_shape[2]);
hypre_AddIndexes(shift_index, hypre_BoxIMin(fgrid_box), 3,
hypre_BoxIMin(&shift_ibox));
hypre_AddIndexes(shift_index, hypre_BoxIMax(fgrid_box), 3,
hypre_BoxIMax(&shift_ibox));
hypre_IntersectBoxes(&shift_ibox, fgrid_box, &shift_ibox);
hypre_SetIndex3(shift_index, -stencil_shape[0], -stencil_shape[1],
-stencil_shape[2]);
/*-----------------------------------------------------------
* Check to see if the stencil does not couple to a sibling
* box. These boxes should be in boxman_entries. But do not
* subtract fgrid_box itself, which is also in boxman_entries.
*-----------------------------------------------------------*/
hypre_AddIndexes(stencil_shape, hypre_BoxIMin(&shift_ibox), 3,
hypre_BoxIMin(&shift_ibox));
hypre_AddIndexes(stencil_shape, hypre_BoxIMax(&shift_ibox), 3,
hypre_BoxIMax(&shift_ibox));
intersect_boxes= hypre_BoxArrayCreate(1, ndim);
hypre_CopyBox(&shift_ibox, hypre_BoxArrayBox(intersect_boxes,0));
for (j= 0; j< nboxman_entries; j++)
{
hypre_SStructBoxManEntryGetProcess(boxman_entries[j], &proc);
hypre_SStructBoxManEntryGetBoxnum(boxman_entries[j], &fj);
if ((proc != myid) || (fj != fi))
{
hypre_BoxManEntryGetExtents(boxman_entries[j], ilower, iupper);
hypre_BoxSetExtents(&scaled_box, ilower, iupper);
hypre_IntersectBoxes(&shift_ibox, &scaled_box, &intersect_box);
if ( hypre_BoxVolume(&intersect_box) )
{
hypre_CopyBox(&intersect_box,
hypre_BoxArrayBox(tmp_box_array1, 0));
tmp_box_array2= hypre_BoxArrayCreate(0, ndim);
hypre_SubtractBoxArrays(intersect_boxes,
tmp_box_array1,
tmp_box_array2);
hypre_BoxArrayDestroy(tmp_box_array2);
}
}
} /* for (j= 0; j< nboxman_entries; j++) */
/*-----------------------------------------------------------
* intersect_boxes now has the shifted extents for the
* coefficients to be zeroed.
*-----------------------------------------------------------*/
a_ptr= hypre_StructMatrixExtractPointerByIndex(smatrix,
fi,
stencil_shape);
hypre_ForBoxI(fj, intersect_boxes)
{
hypre_CopyBox(hypre_BoxArrayBox(intersect_boxes, fj), &intersect_box);
hypre_AddIndexes(shift_index, hypre_BoxIMin(&intersect_box), 3,
hypre_BoxIMin(&intersect_box));
hypre_AddIndexes(shift_index, hypre_BoxIMax(&intersect_box), 3,
hypre_BoxIMax(&intersect_box));
hypre_BoxGetSize(&intersect_box, loop_size);
hypre_BoxLoop1Begin(ndim, loop_size,
a_dbox, hypre_BoxIMin(&intersect_box),
stride, ia);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,ia) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(ia)
{
a_ptr[ia] = 0.0;
}
hypre_BoxLoop1End(ia);
} /* hypre_ForBoxI(fj, intersect_boxes) */
hypre_BoxArrayDestroy(intersect_boxes);
} /* if (abs_shape) */
} /* for (i= 0; i< stencil_size; i++) */
} /* if (stencils != NULL) */
} /* for (var2= 0; var2< nvars; var2++) */
hypre_TFree(boxman_entries);
} /* hypre_ForBoxI(fi, fgrid_boxes) */
} /* for (var1= 0; var1< nvars; var1++) */
hypre_BoxArrayDestroy(tmp_box_array1);
return ierr;
}
|
reorder_ref.h | /* Copyright (c) 2018 NoobsHPC Authors, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef NBHPC_ICESWORD_OPERATOR_REORDER_H
#define NBHPC_ICESWORD_OPERATOR_REORDER_H
#pragma once
#include <vector>
#include "icesword/types.h"
namespace noobshpc{
namespace icesword{
// reorder_hw2wh<OP_dtypeDType>(weight, weight_reorder, N, dim_k);
template<typename dtype>
static inline Status reorder_hw2wh(const void* in, void* out, const size_t w, const size_t h) {
CHECK_EQ(h * w != 0, true) << "" << "wrong h,w value !";
auto src = (const dtype *)in;
auto dst = (dtype *)out;
#pragma omp parallel for collapse(1)
for (auto i = 0; i < h; i++) {
#pragma omp simd
for (auto j = 0; j < w; j++) {
dst[j * h + i] = src[i * w + j];
}
}
return S_Success;
}
// reorder_hw2wh<dtype>((const void**)&weight, &weight_reorder, N, dim_k);
template<typename dtype>
static inline Status reorder_hw2wh(const void** in, void** out, const size_t h, const size_t w) {
CHECK_EQ(h * w != 0, true) << "" << "wrong h,w value !";
auto src = (const dtype **)in;
auto dst = (dtype **)out;
#pragma omp parallel for collapse(1)
for (auto i = 0; i < h; i++) {
#pragma omp simd
for (auto j = 0; j < w; j++) {
(*dst)[j * h + i] = (*src)[i * w + j];
}
}
return S_Success;
}
} // namespace icesword
} // namespace noobshpc
#endif // NBHPC_ICESWORD_OPERATOR_REORDER_H
|
stitching.c | //
// Created by Benedict Paten on 3/14/20.
//
// Code for stitching together "chunks" of inferred sequence
//
#include "margin.h"
#include "htsIntegration.h"
int chunkToStitch_cmp(ChunkToStitch *chunk1, ChunkToStitch *chunk2) {
/*
* Compares two chunks by their ordinal in the output order.
*/
return chunk1->chunkOrdinal < chunk2->chunkOrdinal ? -1 : (chunk1->chunkOrdinal > chunk2->chunkOrdinal ? 1 : 0);
}
ChunkToStitch *chunkToStitch_construct(char *seqName, int64_t chunkOrdinal, bool phased,
bool initRepeatCounts, bool initPoa) {
ChunkToStitch *chunk = calloc(1, sizeof(ChunkToStitch));
chunk->seqName = seqName;
chunk->chunkOrdinal = chunkOrdinal;
chunk->wasSwitched = FALSE;
if (phased) {
chunk->readsHap1Lines = stList_construct3(0, free);
chunk->readsHap2Lines = stList_construct3(0, free);
}
if (initRepeatCounts) {
chunk->repeatCountLinesHap1 = stList_construct3(0, free);
if (phased) chunk->repeatCountLinesHap2 = stList_construct3(0, free);
}
if (initPoa) {
chunk->poaHap1StringsLines = stList_construct3(0, free);
if (phased) chunk->poaHap2StringsLines = stList_construct3(0, free);
}
return chunk;
}
void chunkToStitch_destruct(ChunkToStitch *chunkToStitch) {
if (chunkToStitch->seqName != NULL) free(chunkToStitch->seqName);
if (chunkToStitch->seqHap1 != NULL) free(chunkToStitch->seqHap1);
// Second sequence and remaining fields are optional
if (chunkToStitch->seqHap2 != NULL) {
free(chunkToStitch->seqHap2);
}
if (chunkToStitch->poaHap1StringsLines != NULL) {
stList_destruct(chunkToStitch->poaHap1StringsLines);
}
if (chunkToStitch->poaHap2StringsLines != NULL) {
stList_destruct(chunkToStitch->poaHap2StringsLines);
}
if (chunkToStitch->repeatCountLinesHap1 != NULL) {
stList_destruct(chunkToStitch->repeatCountLinesHap1);
}
if (chunkToStitch->repeatCountLinesHap2 != NULL) {
stList_destruct(chunkToStitch->repeatCountLinesHap2);
}
if (chunkToStitch->readsHap1Lines != NULL) {
stList_destruct(chunkToStitch->readsHap1Lines);
}
if (chunkToStitch->readsHap2Lines != NULL) {
stList_destruct(chunkToStitch->readsHap2Lines);
}
free(chunkToStitch);
}
stList *readChunk(FILE *fh, char **seqName, int64_t *chunkOrdinal) {
/*
* Reads "chunks" from a file. Each chunk starts with a line formatted as:
* SEQ_NAME,CHUNK_ORDINAL,LINE_NUMBER\n
* where SEQ_NAME is the name of the string, containing any characters other than white space,
* CHUNK_ORDINAL is the order of the chunk in the output,
* and LINE_NUMBER is an integer >= 0 that gives the remaining number of lines in the chunk to read.
* Returns the chunk as a list of lines, in order, and initializes the seqName argument to be a string representing
* seqName.
*
* If the EOF is reached will return NULL, set seqName to NULL and chunkOrdinal to -1.
*
* Newlines characters are omitted from the ends of each line string.
*/
char *headerLine = stFile_getLineFromFile(fh);
if (headerLine == NULL) {
*chunkOrdinal = -1;
*seqName = NULL; // Seq seqName to NULL;
return NULL;
}
stList *tokens = stString_splitByString(headerLine, ",");
if (stList_length(tokens) != 3) {
st_errAbort("Expected three tokens in header line, got %" PRIi64 "\n", stList_length(tokens));
}
*seqName = stList_removeFirst(tokens); // Set seqName
*chunkOrdinal = strtol(stList_get(tokens, 0), NULL, 10); // Get chunk ordinal
int64_t lineNo = strtol(stList_peek(tokens), NULL, 10); // Get line number
stList *lines = stList_construct3(0, free);
for (int64_t i = 0; i < lineNo; i++) {
char *bodyLine = stFile_getLineFromFile(fh);
if (bodyLine == NULL) {
st_errAbort("Failed to read body line from chunk, line %" PRIi64 " of %" PRIi64 " lines\n", i, lineNo);
}
stList_append(lines, bodyLine);
}
// Cleanup
stList_destruct(tokens);
free(headerLine);
return lines;
}
stList *readChunk2(FILE *fh, char *expectedSequenceName, int64_t expectedChunkOrdinal) {
/*
* As readChunk, but creates an error if no chunk is found or the read seqName is not equal to expectedSequenceName.
*/
char *seqName;
int64_t chunkOrdinal;
stList *lines = readChunk(fh, &seqName, &chunkOrdinal);
if (lines == NULL) {
return NULL;
}
if (!stString_eq(seqName, expectedSequenceName)) {
st_errAbort("Got an unexpected sequence name: %s in reading chunk (expected: %s)\n", seqName,
expectedSequenceName);
}
if (expectedChunkOrdinal != chunkOrdinal) {
st_errAbort("Got an unexpected chunk ordinal (%" PRIi64 ") in reading chunk (expected: %" PRIi64 ")\n",
chunkOrdinal, expectedChunkOrdinal);
}
free(seqName);
return lines;
}
bool chunkToStitch_readSequenceChunk(FILE *fh, ChunkToStitch *chunk, bool phased) {
/*
* Reads a "chunk" from a sequence containing file, adding it to chunk. Returns non-zero if no more chunks remain.
*/
// Read the next set of lines from the file
stList *lines = readChunk(fh, &chunk->seqName, &chunk->chunkOrdinal);
// If we get nothing we have exhausted the file
if (lines == NULL) {
return 0;
}
chunk->seqHap1 = stString_join2("", lines); // Concatenate the lines to make the sequence
stList_destruct(lines); // Cleanup
if (phased) {
int64_t i;
char *name;
lines = readChunk(fh, &name, &i);
if (lines == NULL) {
st_errAbort("Error trying get alt sequence from chunk");
}
if (i != chunk->chunkOrdinal) {
st_errAbort(
"Got an unexpected chunk ordinal (%" PRIi64 ") in reading second haplotype chunk (expected: %" PRIi64 ")\n",
i, chunk->chunkOrdinal);
}
if (!stString_eq(chunk->seqName, name)) {
st_errAbort("Got an unexpected hap2 sequence name: %s in reading chunk (expected: %s)\n", name,
chunk->seqName);
}
chunk->seqHap2 = stString_join2("", lines);
stList_destruct(lines); // Cleanup
free(name);
}
return 1;
}
void chunkToStitch_readPoaChunk(FILE *fh, ChunkToStitch *chunk, bool phased) {
/*
* Reads a chunk from a POA containing file.
*/
chunk->poaHap1StringsLines = readChunk2(fh, chunk->seqName, chunk->chunkOrdinal);
if (phased) {
chunk->poaHap2StringsLines = readChunk2(fh, chunk->seqName, chunk->chunkOrdinal);
}
}
bool chunkToStitch_readReadPhasingChunk(FILE *fh, ChunkToStitch *chunk) {
/*
* Reads a read phasing chunk. Returns true if chunk was found, returns FALSE if end of chunk
*/
assert(chunk->readsHap1Lines == NULL);
assert(chunk->readsHap2Lines == NULL);
if (chunk->seqName == NULL) { // case where we are skipping fasta output for improved speed
chunk->readsHap1Lines = readChunk(fh, &chunk->seqName, &chunk->chunkOrdinal);
int64_t i;
char *name;
chunk->readsHap2Lines = readChunk(fh, &name, &i);
if (chunk->readsHap2Lines != NULL) {
if (i != chunk->chunkOrdinal) {
st_errAbort("Got an unexpected chunk ordinal (%"PRIi64") in reading sequence lines (expected: %"PRIi64")\n",
i, chunk->chunkOrdinal);
}
if (!stString_eq(chunk->seqName, name)) {
st_errAbort("Got an unexpected hap2 sequence name: %s in reading sequence lines (expected: %s)\n", name,
chunk->seqName);
}
free(name);
}
} else { // standard case, chunk is initialized from sequence file
chunk->readsHap1Lines = readChunk2(fh, chunk->seqName, chunk->chunkOrdinal);
chunk->readsHap2Lines = readChunk2(fh, chunk->seqName, chunk->chunkOrdinal);
}
if (chunk->readsHap1Lines == NULL ^ chunk->readsHap2Lines == NULL) {
st_errAbort("Got reads for one chunk but not another! Expected chunk %"PRId64"\n", chunk->chunkOrdinal);
}
return chunk->readsHap1Lines != NULL;
}
void chunkToStitch_readRepeatCountChunk(FILE *fh, ChunkToStitch *chunk, bool phased) {
/*
* Reads repeat counts chunk
*/
chunk->repeatCountLinesHap1 = readChunk2(fh, chunk->seqName, chunk->chunkOrdinal);
if (phased) {
chunk->repeatCountLinesHap2 = readChunk2(fh, chunk->seqName, chunk->chunkOrdinal);
}
}
static void swap(void **a, void **b) {
void *c = *a;
*a = *b;
*b = c;
}
static void addToHapReadsSeen(stHash *hapReads, stHash *otherHapReads, stHash *readsToAdd) {
/*
* Adds read names / probs from readsToAdd to hapReads that are not in otherHapReads. Cleans up readsToAdd.
*/
stHashIterator *it = stHash_getIterator(readsToAdd);
char *readName;
while ((readName = stHash_getNext(it)) != NULL) {
double *prob = stHash_search(readsToAdd, readName);
/*
* Check if in reads for other haplotype.
* If it is, then if the prob of being in this haplotype is greater then
* remove it from the other haplotype so it can be added to this haplotype,
* otherwise do not add it to this haplotype.
*/
double *pProb;
if ((pProb = stHash_search(otherHapReads, readName)) != NULL) {
if (*prob > *pProb) {
free(stHash_removeAndFreeKey(otherHapReads, readName)); // Remove from otherHapReads
} else {
free(prob);
continue;
}
}
/*
* Now add the read to this haplotype
*/
if ((pProb = stHash_search(hapReads, readName)) == NULL) {
stHash_insert(hapReads, stString_copy(readName), prob);
} else if (*prob > *pProb) {
free(stHash_removeAndFreeKey(hapReads, readName)); // Cleanup the prior entries
stHash_insert(hapReads, stString_copy(readName), prob);
} else {
free(prob);
}
}
stHash_destructIterator(it);
stHash_setDestructValues(readsToAdd, NULL);
stHash_destruct(readsToAdd);
}
stHash *getReadNames(stList *readPartitionLines) {
/*
* Parse the names of the reads from the lines of output representing the relative read phasing and return as a set
* of strings.
*/
stHash *readNames = stHash_construct3(stHash_stringKey, stHash_stringEqualKey, free, free);
for (int64_t i = 1; i < stList_length(readPartitionLines); i++) {
char *line = stList_get(readPartitionLines, i);
stList *tokens = stString_splitByString(line, ",");
assert(stHash_search(readNames, stList_get(tokens, 0)) ==
NULL); // Sanity check that read name is not present twice
double *prob = st_calloc(1, sizeof(double));
*prob = strtof(stList_peek(tokens), NULL); // Get the log prob of the read being in the partition
stHash_insert(readNames, stList_removeFirst(tokens), prob); // First field is the read name
stList_destruct(tokens);
}
return readNames;
}
int64_t sizeOfIntersection(stHash *pSet, stHash *nSet) {
/*
* Returns the number of keys in nSet also in pSet.
*/
stHashIterator *it = stHash_getIterator(nSet);
int64_t i = 0;
char *readName;
while ((readName = stHash_getNext(it)) != NULL) {
if ((stHash_search(pSet, readName)) != NULL) {
i++;
}
}
stHash_destructIterator(it);
return i;
}
int64_t sizeOfIntersectionWithNonNegativeValues(stHash *pSet, stHash *nSet) {
/*
* Returns the number of keys in nSet also in pSet.
*/
stHashIterator *it = stHash_getIterator(nSet);
int64_t i = 0;
char *readName;
while ((readName = stHash_getNext(it)) != NULL) {
double nLikelihood = *((double*) stHash_search(nSet, readName));
if (nLikelihood < 0)
continue;
if ((stHash_search(pSet, readName)) != NULL) {
double pLikelihood = *((double*) stHash_search(pSet, readName));
if (pLikelihood < 0)
continue;
i++;
}
}
stHash_destructIterator(it);
return i;
}
void chunkToStitch_phaseAdjacentChunks(ChunkToStitch *chunk, stHash *readsInHap1, stHash *readsInHap2, Params *params ) {
/*
* Phases chunk so that hap1 in chunk corresponds to hap1 in the prior chunks (as best as we can tell).
*/
// Get the names of the reads in the different read sets
stHash *chunkHap1Reads = getReadNames(chunk->readsHap1Lines);
stHash *chunkHap2Reads = getReadNames(chunk->readsHap2Lines);
// Calculate the intersection between reads shared between the chunks
int64_t cisH1 = params->phaseParams->stitchWithPrimaryReadsOnly ?
sizeOfIntersectionWithNonNegativeValues(readsInHap1, chunkHap1Reads) : sizeOfIntersection(readsInHap1, chunkHap1Reads);
int64_t cisH2 = params->phaseParams->stitchWithPrimaryReadsOnly ?
sizeOfIntersectionWithNonNegativeValues(readsInHap2, chunkHap2Reads) : sizeOfIntersection(readsInHap2, chunkHap2Reads);
int64_t transH1 = params->phaseParams->stitchWithPrimaryReadsOnly ?
sizeOfIntersectionWithNonNegativeValues(readsInHap2, chunkHap1Reads) : sizeOfIntersection(readsInHap2, chunkHap1Reads);
int64_t transH2 = params->phaseParams->stitchWithPrimaryReadsOnly ?
sizeOfIntersectionWithNonNegativeValues(readsInHap1, chunkHap2Reads) : sizeOfIntersection(readsInHap1, chunkHap2Reads);
// Calculate support for the cis (keeping the current relative phasing) and the trans (switching the phasing) configurations
int64_t cisPhase = cisH1 + cisH2; // Number of reads consistently phased in cis configuration
int64_t transPhase = transH2 + transH1; // Number of reads consistently phased in trans configuration
int64_t total = cisPhase + transPhase;
// Log the support for the phasing
char *logIdentifier = getLogIdentifier();
st_logInfo(" %s In stitching chunk %"PRId64" got %"
PRIi64 " hap1 and %" PRIi64 " hap2 reads\n",
logIdentifier, chunk->chunkOrdinal, stHash_size(chunkHap1Reads), stHash_size(chunkHap2Reads));
st_logInfo(
" %s Support for phasing cis-configuration, Total: %" PRIi64 " (%f), %" PRIi64 " (%f) in h1 intersection, %" PRIi64 " (%f) in h2 intersection\n",
logIdentifier, cisPhase, 1.0 * cisPhase / total, cisH1, 1.0 * cisH1 / (cisH1 + transH2), cisH2, 1.0 * cisH2 / (cisH2 + transH2));
st_logInfo(
" %s Support for phasing trans-configuration, Total: %" PRIi64 " (%f), %" PRIi64 " (%f) in h1 intersection, %" PRIi64 " (%f) in h2 intersection\n",
logIdentifier, transPhase, 1.0 * transPhase / total, transH1, 1.0 * transH1 / (transH1 + cisH1), transH2, 1.0 * transH2 / (transH2 + cisH2));
// Switch the relative phasing if the trans phase configuration has more support
if (cisPhase < transPhase) {
st_logInfo(" %s Flipping phase of chunk\n", logIdentifier);
swap((void *) &chunk->seqHap1, (void *) &chunk->seqHap2);
swap((void *) &chunk->poaHap1StringsLines, (void *) &chunk->poaHap2StringsLines);
swap((void *) &chunk->readsHap1Lines, (void *) &chunk->readsHap2Lines);
swap((void *) &chunk->repeatCountLinesHap1, (void *) &chunk->repeatCountLinesHap2);
swap((void *) &chunkHap1Reads, (void *) &chunkHap2Reads);
chunk->wasSwitched = TRUE;
}
//Remove duplicated reads from output
addToHapReadsSeen(readsInHap1, readsInHap2, chunkHap1Reads);
addToHapReadsSeen(readsInHap2, readsInHap1, chunkHap2Reads);
// Cleanup
free(logIdentifier);
}
static int64_t MIN_OVERLAP_ANCHOR_PAIRS = 2;
void setMinOverlapAnchorPairs(int64_t minOverlapAnchorPairs) {
MIN_OVERLAP_ANCHOR_PAIRS = minOverlapAnchorPairs;
}
char *getLargeNucleotideSequenceSummary(char *sequence) {
char *tmpSeq;
if (strlen(sequence) > 17) {
char ch = sequence[8];
int64_t sequenceLen = strlen(sequence);
sequence[8] = '\0';
tmpSeq = stString_print("%s...%s", sequence, &(sequence[sequenceLen - 8]));
sequence[8] = ch;
} else {
tmpSeq = stString_copy(sequence);
}
return tmpSeq;
}
int64_t removeOverlap(char *prefixString, int64_t prefixStringLength, char *suffixString, int64_t suffixStringLength,
int64_t approxOverlap, PolishParams *polishParams,
int64_t *prefixStringCropEnd, int64_t *suffixStringCropStart) {
// setup
char *logIdentifier = getLogIdentifier();
// Align the overlapping suffix of the prefixString and the prefix of the suffix string
// Get coordinates of substrings to be aligned
int64_t i = (prefixStringLength - approxOverlap) < 0 ? 0 : prefixStringLength - approxOverlap;
int64_t j = approxOverlap < suffixStringLength ? approxOverlap : suffixStringLength;
// calcluate if both sequences are bookended by Ns
bool pSeqNs = prefixString[i] == 'N' && prefixString[strlen(prefixString) - 1] == 'N';
bool sSeqNs = suffixString[0] == 'N' && suffixString[j-1] == 'N';
if (pSeqNs && sSeqNs) {
st_logInfo(" %s Both prefix and suffix overlap sequences are flanked by Ns, not attempting to align\n",
logIdentifier);
free(logIdentifier);
*prefixStringCropEnd = prefixStringLength;
*suffixStringCropStart = 0;
return -1;
}
// Crop suffix
char c = suffixString[j];
suffixString[j] = '\0';
// Symbol strings
SymbolString sX = symbolString_construct(&(prefixString[i]), 0, strlen(&(prefixString[i])), polishParams->alphabet);
SymbolString sY = symbolString_construct(suffixString, 0, strlen(suffixString), polishParams->alphabet);
// Use default state machine for alignment
StateMachine *sM = stateMachine3_constructNucleotide(threeState);
// Get quick and dirty anchor pairs
stList *anchorPairs = getKmerAlignmentAnchors(sX, sY, (uint64_t) polishParams->p->diagonalExpansion);
stList *alignedPairs = NULL;
// failure case for anchoring, 0 or 1 anchors
if (stList_length(anchorPairs) < MIN_OVERLAP_ANCHOR_PAIRS) {
st_logInfo(" %s Anchoring for overlap alignment (lengths p:%"PRId64", s:%"PRId64") failed for having %"PRId64" "
"(< %"PRId64") entries\n", logIdentifier, sX.length, sY.length, stList_length(anchorPairs),
MIN_OVERLAP_ANCHOR_PAIRS);
// Do not attempt alignment
alignedPairs = stList_construct();
// TODO here we could save an align point in the middle of the overlap?
// would need to be careful about .5 overlap boundary being unaligned given run length changes
} else {
// Anchoring worked: run the alignment
alignedPairs = getAlignedPairsUsingAnchors(sM, sX, sY, anchorPairs, polishParams->p, 1, 1);
st_logInfo(" %s Got %"PRId64" anchor pairs and %"PRId64" aligned pairs while removing overlap for sequences of "
"length p:%"PRId64", s:%"PRId64"\n",
logIdentifier, stList_length(anchorPairs), stList_length(alignedPairs), sX.length, sY.length);
}
// Cleanup
symbolString_destruct(sX);
symbolString_destruct(sY);
stateMachine_destruct(sM);
stList_destruct(anchorPairs);
// Remove the suffix crop
suffixString[j] = c;
// Pick the median point
stIntTuple *maxPair = NULL;
int64_t maxPairIdx = -1;
int64_t badAlignedPairCount = 0;
for (int64_t k = 0; k < stList_length(alignedPairs); k++) {
stIntTuple *aPair = stList_get(alignedPairs, k);
int64_t p = stIntTuple_get(aPair, 1);
int64_t s = stIntTuple_get(aPair, 2);
if (p < 0 || s < 0 || p >= prefixStringLength - i || s >= j) {
if (badAlignedPairCount == 0) {
st_logInfo(" %s CRITICAL proposed aligned pair (p%"PRId64", s%"PRId64") outside bounds p[0,%"PRId64"), s[0,%"PRId64")\n",
logIdentifier, p, s, prefixStringLength - i, j);
}
badAlignedPairCount++;
} else if (maxPair == NULL || stIntTuple_get(aPair, 0) > stIntTuple_get(maxPair, 0)) {
maxPairIdx = k;
maxPair = aPair;
}
}
if (badAlignedPairCount > 0) {
st_logCritical(" %s getAlignedPairsUsingAnchors proposed %"PRId64" (of %"PRId64") pairs outside of bounds p[0,%"PRId64"), s[0,%"PRId64")\n",
logIdentifier, badAlignedPairCount, stList_length(alignedPairs), prefixStringLength - i, j);
}
if (maxPair == NULL) {
// failed to find median point, loggit
if (st_getLogLevel() >= info) {
char *pSeqSummary = getLargeNucleotideSequenceSummary(&(prefixString[i]));
char *sSeqSummary = getLargeNucleotideSequenceSummary(suffixString);
st_logInfo(" %s Failed to find any aligned pairs between overlapping strings (prefix:%s, suffix:%s), not "
"doing any trimming (approx overlap: %i, total lengths: prefix %i, suffix %i)\n",
logIdentifier, pSeqSummary, sSeqSummary, approxOverlap, prefixStringLength, suffixStringLength);
free(pSeqSummary);
free(sSeqSummary);
}
*prefixStringCropEnd = prefixStringLength;
*suffixStringCropStart = 0;
} else {
st_logInfo(" %s Selecting best aligned pair at index %"PRId64" with pos p:%"PRId64"+%"PRId64", s:%"PRId64" with weight %"PRId64"\n",
logIdentifier, maxPairIdx, stIntTuple_get(maxPair, 1), i, stIntTuple_get(maxPair, 2),
stIntTuple_get(maxPair, 0));
*prefixStringCropEnd = stIntTuple_get(maxPair, 1) + i; // Exclusive
*suffixStringCropStart = stIntTuple_get(maxPair, 2); // Inclusive
}
int64_t overlapWeight = maxPair == NULL ? -1 : stIntTuple_get(maxPair, 0);
stList_destruct(alignedPairs);
free(logIdentifier);
return overlapWeight;
}
void renumberCSVLines(stList *csvLines, int64_t index) {
/*
* Renumber the CSV lines in the output so that they are all sequential
*/
for (int64_t i = 0; i < stList_length(csvLines); i++) { // Start from 0 as header line is already gone
char *line = stList_get(csvLines, i);
stList *tokens = stString_splitByString(line, ",");
free(stList_get(tokens, 0)); // Cleanup the old index
stList_set(tokens, 0, stString_print("%" PRIi64 "", index++));
stList_set(csvLines, i, stString_join2(",", tokens));
stList_destruct(tokens);
free(line);
}
}
int64_t chunkToStitch_trimAdjacentChunks2(char **pSeq, char **seq,
stList *pPoa, stList *poa, stList *pRepeatCounts, stList *repeatCounts,
Params *params, int64_t *lengthOfSequenceOutputSoFar) {
// for logging
char *logIdentifier = getLogIdentifier();
// for very fast case where we don't write sequences, sanity check error case (this should never happen)
if (*pSeq == NULL && *seq == NULL) {
st_errAbort(" %s Encountered null sequences when stitching adjacent chunks!", logIdentifier);
}
// Convert to RLE space
RleString *pSeqRle = params->polishParams->useRunLengthEncoding ?
rleString_construct(*pSeq) : rleString_construct_no_rle(*pSeq);
RleString *seqRle = params->polishParams->useRunLengthEncoding ?
rleString_construct(*seq) : rleString_construct_no_rle(*seq);
// Get the trim factor
int64_t pSeqCropEnd = -1, seqCropStart = -1;
int64_t overlapMatchWeight = removeOverlap(pSeqRle->rleString, pSeqRle->length, seqRle->rleString, seqRle->length,
params->polishParams->chunkBoundary * 2,
params->polishParams, &pSeqCropEnd, &seqCropStart);
// Loggit
st_logInfo(
" %s Removing overlap between neighbouring chunks (in RLE space). Approx overlap size: %i, "
"overlap-match weight: %f, left-trim: %i, right-trim: %i:\n", logIdentifier,
(int) params->polishParams->chunkBoundary * 2,
(float) overlapMatchWeight / PAIR_ALIGNMENT_PROB_1, pSeqRle->length - pSeqCropEnd, seqCropStart);
// sanity check
if (pSeqCropEnd > pSeqRle->length || seqCropStart < 0 || seqCropStart > seqRle->length) {
st_errAbort(" %s Got invalid crop points, expected pSeqEnd %"PRId64" <= pSeqLen %"PRId64", "
"seqStart %"PRId64" >= 0, seqStart %"PRId64" <= seqLen %"PRId64"\n",
pSeqCropEnd, pSeqRle->length, seqCropStart, seqCropStart, seqRle->length);
}
// debug logging
if (st_getLogLevel() >= info) {
char *tmpSeq = getLargeNucleotideSequenceSummary(pSeqRle->rleString);
st_logInfo(" %s pSeq: pSeqCropEnd:%7"PRId64", LenRLE:%7"PRId64", LenRAW:%7"PRId64", seq: %s\n",
logIdentifier, pSeqCropEnd, pSeqRle->length, pSeqRle->nonRleLength, tmpSeq);
free(tmpSeq);
tmpSeq = getLargeNucleotideSequenceSummary(seqRle->rleString);
st_logInfo(" %s seq: seqCropStart:%7"PRId64", LenRLE:%7"PRId64", LenRAW:%7"PRId64", seq: %s\n",
logIdentifier, seqCropStart, seqRle->length, seqRle->nonRleLength, tmpSeq);
free(tmpSeq);
}
// Trim the sequences
// Crop the suffix of the previous sequence
RleString *pSeqRleCropped = rleString_copySubstring(pSeqRle, 0, pSeqCropEnd);
free(*pSeq);
*pSeq = rleString_expand(pSeqRleCropped);
// Crop the the prefix of the current chunk's sequence
RleString *seqRleCropped = rleString_copySubstring(seqRle, seqCropStart, seqRle->length - seqCropStart);
free(*seq);
*seq = rleString_expand(seqRleCropped);
// Trim the remaining stuff
int64_t suffixRleTrimLength = pSeqRle->length - pSeqCropEnd;
*lengthOfSequenceOutputSoFar += pSeqCropEnd;
// debug logging
if (st_getLogLevel() >= info) {
char *tmpSeq = getLargeNucleotideSequenceSummary(pSeqRleCropped->rleString);
st_logInfo(" %s pSeq TRIMMED: LenRLE:%7"PRId64", LenRAW:%7"PRId64", seq: %s\n",
logIdentifier, pSeqRleCropped->length, pSeqRleCropped->nonRleLength, tmpSeq);
free(tmpSeq);
tmpSeq = getLargeNucleotideSequenceSummary(seqRleCropped->rleString);
st_logInfo(" %s seq TRIMMED: LenRLE:%7"PRId64", LenRAW:%7"PRId64", seq: %s\n",
logIdentifier, seqRleCropped->length, seqRleCropped->nonRleLength, tmpSeq);
free(tmpSeq);
}
// Poa
if (poa != NULL) {
stList_removeInterval(pPoa, stList_length(pPoa) - suffixRleTrimLength, suffixRleTrimLength);
stList_removeInterval(poa, 0, seqCropStart + 2);
renumberCSVLines(poa, 1 + *lengthOfSequenceOutputSoFar);
}
// Repeat counts
if (repeatCounts != NULL) {
stList_removeInterval(pRepeatCounts, stList_length(pRepeatCounts) - suffixRleTrimLength, suffixRleTrimLength);
stList_removeInterval(repeatCounts, 0, seqCropStart + 2);
renumberCSVLines(repeatCounts, 1 + *lengthOfSequenceOutputSoFar);
}
// Cleanup
free(logIdentifier);
rleString_destruct(pSeqRle);
rleString_destruct(pSeqRleCropped);
rleString_destruct(seqRle);
rleString_destruct(seqRleCropped);
return overlapMatchWeight;
}
char *getRunOfNs(int64_t length) {
char *runOfNs = st_calloc(length + 1, sizeof(char));
for (int i = 0; i < length; i++) {
runOfNs[i] = 'N';
}
runOfNs[length]= '\0';
return runOfNs;
}
void chunkToStitch_trimAdjacentChunks(ChunkToStitch *pChunk, ChunkToStitch *chunk, Params *params,
int64_t *lengthOfSequenceOutputSoFarHap1,
int64_t *lengthOfSequenceOutputSoFarHap2) {
/*
* Trims the right end of pChunk and the left end of chunk so that they do not overlap, but are directly contiguous.
*/
// Checks that they are part of the same sequence
assert(stString_eq(pChunk->seqName, chunk->seqName));
char *logIdentifier = getLogIdentifier();
// Trim haplotype 1 sequences
chunkToStitch_trimAdjacentChunks2(&pChunk->seqHap1, &chunk->seqHap1,
pChunk->poaHap1StringsLines, chunk->poaHap1StringsLines,
pChunk->repeatCountLinesHap1, chunk->repeatCountLinesHap1,
params, lengthOfSequenceOutputSoFarHap1);
// Trim haplotype 2 sequences, if it exists
if (chunk->seqHap2 != NULL) {
chunkToStitch_trimAdjacentChunks2(&pChunk->seqHap2, &chunk->seqHap2,
pChunk->poaHap2StringsLines, chunk->poaHap2StringsLines,
pChunk->repeatCountLinesHap2, chunk->repeatCountLinesHap2,
params, lengthOfSequenceOutputSoFarHap2);
}
free(logIdentifier);
}
/*
* OutputChunker
*/
typedef struct _outputChunker {
/*
* Object for managing the output of a polished sequence.
*/
bool useMemoryBuffers;
// Sequence file
bool outputSequence;
char *outputSequenceFile; // This is either the name of the file or the location in memory of the file buffer
FILE *outputSequenceFileHandle;
size_t outputSequenceFileBufferSize; // Used if in memory
// Poa file
bool outputPoa;
char *outputPoaFile;
FILE *outputPoaFileHandle;
size_t outputPoaFileBufferSize;
// Repeat count file
bool outputRepeatCounts;
char *outputRepeatCountFile;
FILE *outputRepeatCountFileHandle;
size_t outputRepeatCountFileBufferSize;
// Read partition file - this must be specified if phasing is to be performed, as the
// information is needed for stitching
bool outputReadPartition;
char *outputReadPartitionFile;
FILE *outputReadPartitionFileHandle;
size_t outputRepeatPartitionFileBufferSize;
Params *params;
} OutputChunker;
static FILE *open(bool output, char **file, size_t *outputBufferSize, bool inMemory, char *openStr) {
if (!output) {
return NULL;
}
if (inMemory) {
if (stString_eq(openStr, "w")) {
return open_memstream(file, outputBufferSize);
}
assert(stString_eq(openStr, "r"));
return fmemopen(*file, (*outputBufferSize) + 1, "r");
}
return safe_fopen(*file, openStr);
}
void outputChunker_open(OutputChunker *outputChunker, char *openStr) {
/*
* Open the files.
*/
outputChunker->outputSequenceFileHandle = open(outputChunker->outputSequence, &(outputChunker->outputSequenceFile),
&(outputChunker->outputSequenceFileBufferSize),
outputChunker->useMemoryBuffers, openStr);
outputChunker->outputPoaFileHandle = open(outputChunker->outputPoa, &(outputChunker->outputPoaFile),
&(outputChunker->outputPoaFileBufferSize),
outputChunker->useMemoryBuffers, openStr);
outputChunker->outputRepeatCountFileHandle = open(outputChunker->outputRepeatCounts,
&(outputChunker->outputRepeatCountFile),
&(outputChunker->outputRepeatCountFileBufferSize),
outputChunker->useMemoryBuffers, openStr);
outputChunker->outputReadPartitionFileHandle = open(outputChunker->outputReadPartition,
&(outputChunker->outputReadPartitionFile),
&(outputChunker->outputRepeatPartitionFileBufferSize),
outputChunker->useMemoryBuffers, openStr);
}
OutputChunker *
outputChunker_construct(Params *params, char *outputSequenceFile, char *outputPoaFile, char *outputReadPartitionFile,
char *outputRepeatCountFile) {
/*
* Create an OutputChunker object, ready to write chunks of output to the given output files.
*/
OutputChunker *outputChunker = st_calloc(1, sizeof(OutputChunker));
// Initialize variables
outputChunker->useMemoryBuffers = 0;
outputChunker->outputSequence = outputSequenceFile != NULL;
outputChunker->outputSequenceFile = outputSequenceFile;
outputChunker->outputPoaFile = outputPoaFile;
outputChunker->outputPoa = outputPoaFile != NULL;
outputChunker->outputRepeatCountFile = outputRepeatCountFile;
outputChunker->outputRepeatCounts = outputRepeatCountFile != NULL;
outputChunker->outputReadPartitionFile = outputReadPartitionFile;
outputChunker->outputReadPartition = outputReadPartitionFile != NULL;
outputChunker->params = params;
// Open files for writing
outputChunker_open(outputChunker, "w");
return outputChunker;
}
OutputChunker *
outputChunker_constructInMemory(Params *params, bool outputSequence, bool outputPoaFile, bool outputReadPartitionFile,
bool outputRepeatCountFile) {
/*
* Create an OutputChunker object, ready to write chunks of output to the given output files.
*/
OutputChunker *outputChunker = st_calloc(1, sizeof(OutputChunker));
// Initialize variables
outputChunker->useMemoryBuffers = 1;
outputChunker->outputSequence = outputSequence;
outputChunker->outputPoa = outputPoaFile;
outputChunker->outputReadPartition = outputReadPartitionFile;
outputChunker->outputRepeatCounts = outputRepeatCountFile;
outputChunker->params = params;
// Open files for writing
outputChunker_open(outputChunker, "w");
return outputChunker;
}
void
outputChunker_processChunkSequence(OutputChunker *outputChunker, int64_t chunkOrdinal, char *sequenceName, Poa *poa,
stList *reads) {
// Create chunk name
char *headerLinePrefix = stString_print("%s,%" PRIi64 ",", sequenceName, chunkOrdinal);
// Sequence
if (outputChunker->outputSequenceFileHandle != NULL) {
// Do run-length decoding
char *outputSequence = rleString_expand(poa->refString);
// Output the sequence, putting the sequence all on one line
fprintf(outputChunker->outputSequenceFileHandle, "%s1\n%s\n", headerLinePrefix, outputSequence);
free(outputSequence);
}
// Poa
if (outputChunker->outputPoaFileHandle != NULL) {
fprintf(outputChunker->outputPoaFileHandle, "%s%" PRIi64 "\n", headerLinePrefix, stList_length(poa->nodes) + 1);
poa_printCSV(poa, outputChunker->outputPoaFileHandle, reads,
outputChunker->params->polishParams->repeatSubMatrix, 5);
}
// Now repeat counts
if (outputChunker->outputRepeatCountFileHandle != NULL) {
fprintf(outputChunker->outputRepeatCountFileHandle, "%s%" PRIi64 "\n", headerLinePrefix,
stList_length(poa->nodes) + 1);
poa_printRepeatCountsCSV(poa, outputChunker->outputRepeatCountFileHandle, reads);
}
// Cleanup
free(headerLinePrefix);
}
void
outputChunker_processChunkSequencePhased2(OutputChunker *outputChunker, char *headerLinePrefix,
Poa *poa, stList *reads, stSet *readsBelongingToHap1,
stSet *readsBelongingToHap2) {
// Output the sequence
if (outputChunker->outputSequenceFileHandle != NULL) {
char *outputSequence = rleString_expand(poa->refString); // Do run-length decoding
fprintf(outputChunker->outputSequenceFileHandle, "%s1\n%s\n", headerLinePrefix, outputSequence);
free(outputSequence); // Cleanup
}
// Poa
if (outputChunker->outputPoaFileHandle != NULL) {
fprintf(outputChunker->outputPoaFileHandle, "%s%" PRIi64 "\n", headerLinePrefix, stList_length(poa->nodes) + 1);
poa_printPhasedCSV(poa, outputChunker->outputPoaFileHandle, reads, readsBelongingToHap1, readsBelongingToHap2,
outputChunker->params->polishParams->repeatSubMatrix, 5);
}
// Output the repeat counts
if (outputChunker->outputRepeatCountFileHandle != NULL) {
fprintf(outputChunker->outputRepeatCountFileHandle, "%s%" PRIi64 "\n", headerLinePrefix,
stList_length(poa->nodes) + 1);
poa_printRepeatCountsCSV(poa, outputChunker->outputRepeatCountFileHandle, reads);
}
}
void outputChunker_processChunkSequencePhased(OutputChunker *outputChunker, int64_t chunkOrdinal, char *sequenceName,
Poa *poaHap1, Poa *poaHap2, stList *reads, stSet *readsBelongingToHap1,
stSet *readsBelongingToHap2, stGenomeFragment *gF, Params *params) {
// Create chunk name
char *headerLinePrefix = stString_print("%s,%" PRIi64 ",", sequenceName, chunkOrdinal);
outputChunker_processChunkSequencePhased2(outputChunker, headerLinePrefix,
poaHap1, reads, readsBelongingToHap1, readsBelongingToHap2);
outputChunker_processChunkSequencePhased2(outputChunker, headerLinePrefix,
poaHap2, reads, readsBelongingToHap2, readsBelongingToHap1);
// becasue we (may) have filtered reads now: readsBelongingToHapX has more reads than GF
stSet *readIdsInGfHap1 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, free);
stSet *readIdsInGfHap2 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, free);
BamChunkRead *read = NULL;
stSetIterator *itor = NULL;
// Output the read partition hap1
fprintf(outputChunker->outputReadPartitionFileHandle, "%s%" PRIi64 "\n", headerLinePrefix,
stSet_size(readsBelongingToHap1) + 1);
stGenomeFragment_printPartitionAsCSV(gF, outputChunker->outputReadPartitionFileHandle, params->phaseParams, 1,
readIdsInGfHap1);
itor = stSet_getIterator(readsBelongingToHap1);
while ((read = stSet_getNext(itor)) != NULL) {
if (stSet_search(readIdsInGfHap1, read->readName) == NULL) {
fprintf(outputChunker->outputReadPartitionFileHandle, "%s,%f\n", read->readName, -1.0);
}
}
stSet_destructIterator(itor);
// Output the read partition hap2
fprintf(outputChunker->outputReadPartitionFileHandle, "%s%" PRIi64 "\n", headerLinePrefix,
stSet_size(readsBelongingToHap2) + 1);
stGenomeFragment_printPartitionAsCSV(gF, outputChunker->outputReadPartitionFileHandle, params->phaseParams, 0,
readIdsInGfHap2);
itor = stSet_getIterator(readsBelongingToHap2);
while ((read = stSet_getNext(itor)) != NULL) {
if (stSet_search(readIdsInGfHap2, read->readName) == NULL) {
fprintf(outputChunker->outputReadPartitionFileHandle, "%s,%f\n", read->readName, -1.0);
}
}
stSet_destructIterator(itor);
fflush(outputChunker->outputReadPartitionFileHandle);
// Cleanup
free(headerLinePrefix);
stSet_destruct(readIdsInGfHap1);
stSet_destruct(readIdsInGfHap2);
}
ChunkToStitch *outputChunker_readChunk(OutputChunker *outputChunker, bool phased) {
/*
* Read a chunk of output from the outputChunker.
*/
ChunkToStitch *chunk = st_calloc(1, sizeof(ChunkToStitch));
if (outputChunker->outputSequenceFile != NULL) {
// primary "end of chunk" determinator
if (!chunkToStitch_readSequenceChunk(outputChunker->outputSequenceFileHandle, chunk, phased)) {
free(chunk);
return NULL;
}
}
if (phased) {
// secondary "end of chunk" determinator. used when we are not outputting sequence (for the very fast)
if (!chunkToStitch_readReadPhasingChunk(outputChunker->outputReadPartitionFileHandle, chunk)) {
if (outputChunker->outputSequenceFile != NULL) {
st_errAbort("Expected chunk phasing info but found none! Expected chunk %"PRId64, chunk->chunkOrdinal);
}
free(chunk);
return NULL;
}
}
if (outputChunker->outputPoaFile != NULL) {
chunkToStitch_readPoaChunk(outputChunker->outputPoaFileHandle, chunk, phased);
}
if (outputChunker->outputRepeatCountFile != NULL) {
chunkToStitch_readRepeatCountChunk(outputChunker->outputRepeatCountFileHandle, chunk, phased);
}
return chunk;
}
void outputChunker_close(OutputChunker *outputChunker) {
// Cleanup the sequence output file
if (outputChunker->outputSequenceFileHandle != NULL) {
fclose(outputChunker->outputSequenceFileHandle);
outputChunker->outputSequenceFileHandle = NULL;
}
// Cleanup repeat count file
if (outputChunker->outputRepeatCountFileHandle != NULL) {
fclose(outputChunker->outputRepeatCountFileHandle);
outputChunker->outputRepeatCountFileHandle = NULL;
}
// Cleanup poa file
if (outputChunker->outputPoaFileHandle != NULL) {
fclose(outputChunker->outputPoaFileHandle);
outputChunker->outputPoaFileHandle = NULL;
}
// Cleanup read partition file
if (outputChunker->outputReadPartitionFileHandle != NULL) {
fclose(outputChunker->outputReadPartitionFileHandle);
outputChunker->outputReadPartitionFileHandle = NULL;
}
}
void outputChunker_closeAndDeleteFiles(OutputChunker *outputChunker) {
/*
* Closes the file streams and removes the output files (used for
* chunker output to temporary files)
*/
outputChunker_close(outputChunker); // Closes file streams
if (!outputChunker->useMemoryBuffers) { // If not in memory need to delete underlying files
// if in memory, buffers will be freed in destructor
// Delete the sequence output file
if (outputChunker->outputSequenceFile != NULL) {
stFile_rmrf(outputChunker->outputSequenceFile);
}
// Delete repeat count file
if (outputChunker->outputRepeatCountFile != NULL) {
stFile_rmrf(outputChunker->outputRepeatCountFile);
}
// Delete the poa file
if (outputChunker->outputPoaFile != NULL) {
stFile_rmrf(outputChunker->outputPoaFile);
}
// Delete read partition file
if (outputChunker->outputReadPartitionFile != NULL) {
stFile_rmrf(outputChunker->outputReadPartitionFile);
}
}
}
void writeLines(FILE *fh, stList *lines) {
for (int64_t i = 0; i < stList_length(lines); i++) {
fprintf(fh, "%s\n", (char*)stList_get(lines, i));
}
}
void outputChunker_writeChunkToFinalOutput(OutputChunker *outputChunker,
char *seqName, char *seq, stList *poaLines, stList *repeatCountLines,
bool startOfSequence) {
/*
* Writes the chunk to the final output files
*/
// Write the sequence
if (outputChunker->outputSequenceFile != NULL) {
if (startOfSequence) {
fastaWrite(seq, seqName, outputChunker->outputSequenceFileHandle);
} else {
fprintf(outputChunker->outputSequenceFileHandle, "%s\n", seq);
}
}
// Write the POA
if (outputChunker->outputPoaFile != NULL) {
writeLines(outputChunker->outputPoaFileHandle, poaLines);
}
// Write the repeat counts
if (outputChunker->outputRepeatCountFile != NULL) {
writeLines(outputChunker->outputRepeatCountFileHandle, repeatCountLines);
}
}
void outputChunker_destruct(OutputChunker *outputChunker) {
// Close any open file descriptors
outputChunker_close(outputChunker);
// Cleanup the sequence output file
if (outputChunker->outputSequenceFile != NULL) {
free(outputChunker->outputSequenceFile);
}
// Cleanup repeat count file
if (outputChunker->outputRepeatCountFile != NULL) {
free(outputChunker->outputRepeatCountFile);
}
// Cleanup poa file
if (outputChunker->outputPoaFile != NULL) {
free(outputChunker->outputPoaFile);
}
// Cleanup read partition file
if (outputChunker->outputReadPartitionFile != NULL) {
free(outputChunker->outputReadPartitionFile);
}
// Cleanup residual
free(outputChunker);
}
/*
* OutputChunkers
*/
struct _outputChunkers {
int64_t noOfOutputChunkers;
stList *tempFileChunkers;
int64_t tempFileChunkerCounter;
int64_t chunkOrderNo;
OutputChunker *outputChunkerHap1;
OutputChunker *outputChunkerHap2;
Params *params;
};
static char *printTempFileName(char *fileName, int64_t index) {
if (fileName == NULL) {
return NULL;
}
return stString_print("%s.%" PRIi64 ".temp", fileName, index);
}
static char *printFinalFileName(char *fileName, char *suffix) {
if (fileName == NULL) {
return NULL;
}
return stString_print("%s%s", fileName, suffix == NULL ? "" : suffix);
}
OutputChunkers *
outputChunkers_construct(int64_t noOfOutputChunkers, Params *params, char *outputSequenceFile, char *outputPoaFile,
char *outputReadPartitionFile, char *outputRepeatCountFile, char *hap1Suffix, char *hap2Suffix,
bool inMemoryBuffers) {
char *outputReadPartitionFileForStitching = NULL;
if (hap2Suffix != NULL) {
if (stString_eq(hap1Suffix, hap2Suffix)) {
st_errAbort("Hap1 and hap2 suffixes are identical, can not open distinct files for output\n");
}
// Make temporary read phasing file if not specified
//TODO if inMemoryBuffers and !shouldOutputReadPartition, this results in shouldOutputReadPartition
if (outputReadPartitionFile == NULL) {
outputReadPartitionFileForStitching = "temp_read_phasing_file.csv";
st_logInfo("> Making a temporary file to store read phasing in: %s\n", outputReadPartitionFileForStitching);
} else {
outputReadPartitionFileForStitching = outputReadPartitionFile;
}
} else {
if (outputReadPartitionFile != NULL) {
st_errAbort("Hap2 not specified but trying to output read partition\n");
}
}
if (inMemoryBuffers) {
st_logInfo("> Saving temporary data to in-memory buffers.\n");
}
OutputChunkers *outputChunkers = st_calloc(1, sizeof(OutputChunkers));
outputChunkers->noOfOutputChunkers = noOfOutputChunkers;
outputChunkers->params = params;
// Make the temporary, parallel chunkers
outputChunkers->tempFileChunkers = stList_construct3(0, (void (*)(void *)) outputChunker_destruct);
for (int64_t i = 0; i < noOfOutputChunkers; i++) {
stList_append(outputChunkers->tempFileChunkers, inMemoryBuffers ?
outputChunker_constructInMemory(params, outputSequenceFile != NULL, outputPoaFile != NULL,
outputReadPartitionFileForStitching != NULL, outputRepeatCountFile != NULL) :
outputChunker_construct(params, printTempFileName(outputSequenceFile, i), printTempFileName(outputPoaFile, i),
printTempFileName(outputReadPartitionFileForStitching, i), printTempFileName(outputRepeatCountFile, i)));
}
// Make the final output chunkers
outputChunkers->outputChunkerHap1 = outputChunker_construct(params,
printFinalFileName(outputSequenceFile, hap1Suffix),
printFinalFileName(outputPoaFile, hap1Suffix),
printFinalFileName(outputReadPartitionFile, hap1Suffix),
printFinalFileName(outputRepeatCountFile, hap1Suffix));
if (hap2Suffix != NULL) {
outputChunkers->outputChunkerHap2 = outputChunker_construct(params,
printFinalFileName(outputSequenceFile, hap2Suffix),
printFinalFileName(outputPoaFile, hap2Suffix),
printFinalFileName(outputReadPartitionFile,
hap2Suffix),
printFinalFileName(outputRepeatCountFile,
hap2Suffix));
}
return outputChunkers;
}
void outputChunkers_processChunkSequence(OutputChunkers *outputChunkers, int64_t chunker, int64_t chunkOrdinal,
char *sequenceName, Poa *poa,
stList *reads) {
outputChunker_processChunkSequence(stList_get(outputChunkers->tempFileChunkers, chunker), chunkOrdinal,
sequenceName, poa,
reads);
}
void outputChunkers_processChunkSequencePhased(OutputChunkers *outputChunkers, int64_t chunker, int64_t chunkOrdinal,
char *sequenceName, Poa *poaHap1, Poa *poaHap2, stList *reads,
stSet *readsBelongingToHap1, stSet *readsBelongingToHap2,
stGenomeFragment *gF, Params *params) {
outputChunker_processChunkSequencePhased(stList_get(outputChunkers->tempFileChunkers, chunker), chunkOrdinal,
sequenceName,
poaHap1,
poaHap2, reads, readsBelongingToHap1,
readsBelongingToHap2, gF, params);
}
void outputChunkers_close(OutputChunkers *outputChunkers) {
/*
* Closes the file handles used.
*/
for (int64_t i = 0; i < stList_length(outputChunkers->tempFileChunkers); i++) {
outputChunker_close(stList_get(outputChunkers->tempFileChunkers, i));
}
outputChunker_close(outputChunkers->outputChunkerHap1);
if (outputChunkers->outputChunkerHap2 != NULL) {
outputChunker_close(outputChunkers->outputChunkerHap2);
}
}
void outputChunkers_openForStitching(OutputChunkers *outputChunkers) {
/*
* Open the output chunkers for stitching
*/
outputChunkers_close(outputChunkers);
for (int64_t i = 0; i < stList_length(outputChunkers->tempFileChunkers); i++) {
outputChunker_open(stList_get(outputChunkers->tempFileChunkers, i), "r");
}
outputChunker_open(outputChunkers->outputChunkerHap1, "w");
if (outputChunkers->outputChunkerHap2 != NULL) {
outputChunker_open(outputChunkers->outputChunkerHap2, "w");
}
}
static ChunkToStitch *outputChunkers_readChunk(OutputChunkers *outputChunkers, bool phased) {
for (int64_t i = 0; i < stList_length(outputChunkers->tempFileChunkers); i++) {
ChunkToStitch *chunk = outputChunker_readChunk(stList_get(outputChunkers->tempFileChunkers,
outputChunkers->tempFileChunkerCounter++ %
stList_length(outputChunkers->tempFileChunkers)),
phased);
if (chunk != NULL) {
return chunk;
}
}
return NULL;
}
static ChunkToStitch *outputChunkers_getNextChunkInSequence(OutputChunkers *outputChunkers,
stSortedSet *orderedChunks, bool phased) {
ChunkToStitch *chunk;
while (1) {
chunk = outputChunkers_readChunk(outputChunkers, phased);
if (chunk == NULL) {
if (stSortedSet_size(orderedChunks) == 0) {
return NULL;
}
chunk = stSortedSet_getFirst(orderedChunks);
if (chunk->chunkOrdinal != outputChunkers->chunkOrderNo) {
st_errAbort("Did not retrieve all the chunks from the temporary output");
}
break;
}
stSortedSet_insert(orderedChunks, chunk);
chunk = stSortedSet_getFirst(orderedChunks);
if (chunk->chunkOrdinal == outputChunkers->chunkOrderNo) {
break;
}
}
stSortedSet_remove(orderedChunks, chunk);
outputChunkers->chunkOrderNo++;
return chunk;
}
void outputChunkers_writeChunk(OutputChunkers *outputChunkers, ChunkToStitch *chunk) {
/*
* Writes the chunk to the final output files
*/
outputChunker_writeChunkToFinalOutput(outputChunkers->outputChunkerHap1,
chunk->seqName, chunk->seqHap1, chunk->poaHap1StringsLines,
chunk->repeatCountLinesHap1, chunk->startOfSequence);
if (outputChunkers->outputChunkerHap2 != NULL) {
outputChunker_writeChunkToFinalOutput(outputChunkers->outputChunkerHap2,
chunk->seqName, chunk->seqHap2, chunk->poaHap2StringsLines,
chunk->repeatCountLinesHap2,
chunk->startOfSequence);
}
}
void writeReadPartition(stHash *readsInHap, FILE *fh) {
/*
* Write out the reads for a haplotype in the given file
*/
fprintf(fh, "READ_NAME,PHRED_SCORE_OF_BEING_IN_PARTITION\n");
stHashIterator *it = stHash_getIterator(readsInHap);
char *readName;
while ((readName = stHash_getNext(it)) != NULL) {
double *prob = stHash_search(readsInHap, readName);
fprintf(fh, "%s,%f\n", readName, *prob);
}
stHash_destructIterator(it);
}
void outputChunkers_stitchLinear(OutputChunkers *outputChunkers, bool phased, Params *params) {
/*
* Stitch together the outputs using a single thread, but very minimal memory.
*/
// Setup to write out the chunks
outputChunkers_openForStitching(outputChunkers);
// Create a cache to hold the chunks, ordered by their ordinal
stSortedSet *orderedChunks = stSortedSet_construct3((int (*)(const void *, const void *)) chunkToStitch_cmp, NULL);
// Get the first chunk
ChunkToStitch *pChunk = outputChunkers_getNextChunkInSequence(outputChunkers, orderedChunks, phased), *chunk;
if (pChunk == NULL) {
// Nothing to do
stSortedSet_destruct(orderedChunks);
return;
}
// Length of the output sequences
int64_t lengthOfSequenceOutputSoFarHap1 = 0;
int64_t lengthOfSequenceOutputSoFarHap2 = 0;
// Track the names of the reads in the two haplotypes, if phased
stHash *hap1Reads = NULL, *hap2Reads = NULL;
if (phased) {
hap1Reads = getReadNames(pChunk->readsHap1Lines);
hap2Reads = getReadNames(pChunk->readsHap2Lines);
}
// Indicate we're at the of beginning a sequences
pChunk->startOfSequence = 1;
// Get each successive chunk and stitch and phase progressively
while ((chunk = outputChunkers_getNextChunkInSequence(outputChunkers, orderedChunks, phased)) != NULL) {
assert(pChunk != NULL);
// If phased, ensure the chunks phasing is consistent
if (phased) {
chunkToStitch_phaseAdjacentChunks(chunk, hap1Reads, hap2Reads, params);
}
// Set the flag determining if this is the start of a new sequence
chunk->startOfSequence = !stString_eq(pChunk->seqName, chunk->seqName);
if (chunk->startOfSequence) { // Reset the lengths of the new sequence output
lengthOfSequenceOutputSoFarHap1 = 0;
lengthOfSequenceOutputSoFarHap2 = 0;
} else { // Trim the boundaries of the two chunks so that they don't overlap
chunkToStitch_trimAdjacentChunks(pChunk, chunk, outputChunkers->params,
&lengthOfSequenceOutputSoFarHap1, &lengthOfSequenceOutputSoFarHap2);
}
// Write out the pChunk
outputChunkers_writeChunk(outputChunkers, pChunk);
// Cleanup the pChunk
chunkToStitch_destruct(pChunk);
// Set the new pChunk
pChunk = chunk;
}
// Write out the pChunk
outputChunkers_writeChunk(outputChunkers, pChunk);
// Cleanup the pChunk
chunkToStitch_destruct(pChunk);
// Write out the read name phasing, if needed
if (phased) {
writeReadPartition(hap1Reads, outputChunkers->outputChunkerHap1->outputReadPartitionFileHandle);
writeReadPartition(hap2Reads, outputChunkers->outputChunkerHap2->outputReadPartitionFileHandle);
}
// Cleanup
if (stSortedSet_size(orderedChunks) != 0) {
st_errAbort("Got chunks left over after writing out chunks");
}
stSortedSet_destruct(orderedChunks);
if (phased) {
stHash_destruct(hap1Reads);
stHash_destruct(hap2Reads);
}
}
void updateStitchingChunk(ChunkToStitch *stitched, ChunkToStitch *pChunk, stList *hap1Seqs, stList *hap2Seqs,
bool phased, bool trackPoa, bool trackRepeatCounts) {
// for very fast case where we don't write fasta out, sanity check error (this should never happen)
if (hap1Seqs == NULL && hap2Seqs == NULL) {
st_errAbort(" %s Encountered null sequences when updating stitching chunks!", getLogIdentifier());
}
stList_append(hap1Seqs, stString_copy(pChunk->seqHap1));
if (phased) {
stList_append(hap2Seqs, stString_copy(pChunk->seqHap2));
}
if (trackPoa) {
stList_appendAll(stitched->poaHap1StringsLines, pChunk->poaHap1StringsLines);
stList_setDestructor(pChunk->poaHap1StringsLines, NULL);
if (phased) {
stList_appendAll(stitched->poaHap2StringsLines, pChunk->poaHap2StringsLines);
stList_setDestructor(pChunk->poaHap2StringsLines, NULL);
}
}
if (trackRepeatCounts) {
stList_appendAll(stitched->repeatCountLinesHap1, pChunk->repeatCountLinesHap1);
stList_setDestructor(pChunk->repeatCountLinesHap1, NULL);
if (phased) {
stList_appendAll(stitched->repeatCountLinesHap2, pChunk->repeatCountLinesHap2);
stList_setDestructor(pChunk->repeatCountLinesHap2, NULL);
}
}
}
void convertReadPartitionToLines(stHash *readsInHap, stList *readPartitionLines) {
/*
* Format the output of the reads for a haplotype
*/
stList_append(readPartitionLines, stString_print("READ_NAME,LOG_PROB_OF_BEING_IN_PARTITION\n"));
stHashIterator *it = stHash_getIterator(readsInHap);
char *readName;
while ((readName = stHash_getNext(it)) != NULL) {
double *prob = stHash_search(readsInHap, readName);
stList_append(readPartitionLines, stString_print("%s,%f\n", readName, *prob));
}
stHash_destructIterator(it);
}
ChunkToStitch *mergeContigChunkz(ChunkToStitch **chunks, int64_t startIdx, int64_t endIdxExclusive, bool phased,
Params *params) {
// for logging
char *logIdentifier = getLogIdentifier();
time_t stitchStart = time(NULL);
st_logInfo(">%s Stitching chunks from index [%"PRId64" to %"PRId64")\n", logIdentifier, startIdx, endIdxExclusive);
// Get the first chunk
ChunkToStitch *pChunk = chunks[startIdx];
ChunkToStitch *chunk = NULL;
// Length of the output sequences
int64_t lengthOfSequenceOutputSoFarHap1 = 0;
int64_t lengthOfSequenceOutputSoFarHap2 = 0;
// Our "stitched" chunk
bool trackSequence = pChunk->seqHap1 != NULL;
bool trackRepeatCounts = pChunk->repeatCountLinesHap1 != NULL;
bool trackPoa = pChunk->poaHap1StringsLines != NULL;
ChunkToStitch *stitched = chunkToStitch_construct(NULL, -1 * pChunk->chunkOrdinal, phased, trackRepeatCounts, trackPoa);
// Lists to keep track of haplotype strings
stList *hap1Seqs = (trackSequence ? stList_construct3(0, free) : NULL);
stList *hap2Seqs = (phased && trackSequence ? stList_construct3(0, free) : NULL);
// Track the names of the reads in the two haplotypes, if phased
stHash *hap1Reads, *hap2Reads;
if (phased) {
hap1Reads = getReadNames(pChunk->readsHap1Lines);
hap2Reads = getReadNames(pChunk->readsHap2Lines);
}
// Get each successive chunk and stitch and phase progressively
for (int64_t chunkIndex = startIdx + 1; chunkIndex < endIdxExclusive; chunkIndex++) {
assert(pChunk != NULL);
chunk = chunks[chunkIndex];
st_logInfo(">%s Stitching chunk %"PRId64" and %"PRId64"\n", logIdentifier,
pChunk->chunkOrdinal, chunk->chunkOrdinal);
// If phased, ensure the chunks phasing is consistent
if (phased) {
chunkToStitch_phaseAdjacentChunks(chunk, hap1Reads, hap2Reads, params);
}
// handles the case where we're not tracking sequences (for very fast)
if (trackSequence) {
// Trim the overlap between chunks
chunkToStitch_trimAdjacentChunks(pChunk, chunk, params,
&lengthOfSequenceOutputSoFarHap1, &lengthOfSequenceOutputSoFarHap2);
// Save to stitched
updateStitchingChunk(stitched, pChunk, hap1Seqs, hap2Seqs, phased, trackPoa, trackRepeatCounts);
}
// Set the new pChunk
pChunk = chunk;
}
// save the last chunk and the read phasing
if (trackSequence) {
updateStitchingChunk(stitched, pChunk, hap1Seqs, hap2Seqs, phased, trackPoa, trackRepeatCounts);
}
stitched->seqHap1 = trackSequence ? stString_join2("", hap1Seqs) : NULL;
if (phased) {
stitched->seqHap2 = trackSequence ? stString_join2("", hap2Seqs) : NULL;
// Save back the reads in each haplotype to the stitched chunk
convertReadPartitionToLines(hap1Reads, stitched->readsHap1Lines);
convertReadPartitionToLines(hap2Reads, stitched->readsHap2Lines);
}
// cleanup
if (trackSequence) stList_destruct(hap1Seqs);
if (phased) {
stList_destruct(hap2Seqs);
stHash_destruct(hap1Reads);
stHash_destruct(hap2Reads);
}
// loggit
st_logInfo(" %s Finished stitching %"PRId64" chunks in %ds\n", logIdentifier, endIdxExclusive - startIdx,
(int) time(NULL) - stitchStart);
free(logIdentifier);
// fin
return stitched;
}
//TODO this is currently unused
// refactored to not use this function. we now multithread per contig and stitch all chunks linearly within contigs
// this function is kept here in case we revert back, but can eventually be removed.
ChunkToStitch *mergeContigChunkzThreaded(ChunkToStitch **chunks, int64_t startIdx, int64_t endIdxExclusive, int64_t numThreads,
bool phased, Params *params, char *referenceSequenceName) {
// special unthreaded case
if (numThreads == 1) return mergeContigChunkz(chunks, startIdx, endIdxExclusive, phased, params);
// divide into chunks
int64_t totalChunks = endIdxExclusive - startIdx;
int64_t chunksPerThread = (int64_t) ceil(1.0 * totalChunks / numThreads);
while (startIdx + chunksPerThread * (numThreads - 1) >= endIdxExclusive) {numThreads--;}
ChunkToStitch **outputChunks = st_calloc(numThreads, sizeof(ChunkToStitch*));
// multithread loop
st_logInfo(" Merging chunks for %s from (%"PRId64", %"PRId64"] with %"PRId64" chunks per thread on %"PRId64" threads \n",
referenceSequenceName, startIdx, endIdxExclusive, chunksPerThread, numThreads);
#pragma omp parallel for schedule(static,1)
for (int64_t thread = 0; thread < numThreads; thread++) {
int64_t threadedStartIdx = startIdx + chunksPerThread * thread;
int64_t threadedEndIdxExclusive = threadedStartIdx + chunksPerThread;
if (endIdxExclusive < threadedEndIdxExclusive) threadedEndIdxExclusive = endIdxExclusive;
// merge for this thread
outputChunks[thread] = mergeContigChunkz(chunks, threadedStartIdx, threadedEndIdxExclusive, phased, params);
}
// finish
ChunkToStitch *stitched = mergeContigChunkz(outputChunks, 0, numThreads, phased, params);
// update stitching in original chunks
#pragma omp parallel for schedule(static,1)
for (int64_t thread = 0; thread < numThreads; thread++) {
int64_t threadedStartIdx = startIdx + chunksPerThread * thread;
int64_t threadedEndIdxExclusive = threadedStartIdx + chunksPerThread;
if (endIdxExclusive < threadedEndIdxExclusive) threadedEndIdxExclusive = endIdxExclusive;
// potentially update all switching
if (outputChunks[thread]->wasSwitched) {
for (int64_t i = threadedStartIdx; i < threadedEndIdxExclusive; i++) {
chunks[i]->wasSwitched = !chunks[i]->wasSwitched;
}
}
// cleanup
chunkToStitch_destruct(outputChunks[thread]);
}
// cleanup
free(outputChunks); //these chunks were freed after switching
return stitched;
}
void outputChunkers_stitch(OutputChunkers *outputChunkers, bool phased, int64_t chunkCount) {
outputChunkers_stitchAndTrackExtraData(outputChunkers, phased, chunkCount, NULL, NULL, NULL);
}
void outputChunkers_stitchAndTrackExtraData(OutputChunkers *outputChunkers, bool phased, int64_t chunkCount,
stList *readIdsHap1, stList *readIdsHap2, bool* switchedState) {
// prep for merge
assert(chunkCount > 0);
// Setup to write out the chunks
outputChunkers_openForStitching(outputChunkers);
// Create a cache to hold the chunks, ordered by their ordinal
ChunkToStitch **chunks = st_calloc(chunkCount, sizeof(ChunkToStitch *));
int64_t *foundChunksPerThread = st_calloc(outputChunkers->noOfOutputChunkers, sizeof(int64_t));
/// get all chunks
# ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
# endif
for (int64_t i = 0; i < outputChunkers->noOfOutputChunkers; i++) {
foundChunksPerThread[i] = 0;
ChunkToStitch *chunk = NULL;
while ((chunk = outputChunker_readChunk(stList_get(outputChunkers->tempFileChunkers, i), phased)) != NULL) {
if (chunks[chunk->chunkOrdinal] != NULL) {
st_errAbort("Encountered chunk %"PRId64" twice while reading from temp files!");
}
chunks[chunk->chunkOrdinal] = chunk;
foundChunksPerThread[i]++;
}
}
// sanity check debugging
int64_t foundChunks = 0;
for (int64_t i = 0; i < outputChunkers->noOfOutputChunkers; i++) {
foundChunks += foundChunksPerThread[i];
}
free(foundChunksPerThread);
if (foundChunks != chunkCount) {
int64_t i = 0;
stList *missingChunks = stList_construct3(0, free);
while (i < outputChunkers->noOfOutputChunkers && stList_length(missingChunks) < 10) {
if (chunks[i] == NULL) stList_append(missingChunks, stString_print("%s", i));
}
if (stList_length(missingChunks) == 10 && i != outputChunkers->noOfOutputChunkers) stList_append(missingChunks, "..");
st_errAbort("Missing %"PRId64" chunks: %s\n", chunkCount - foundChunks, stString_join2(", ", missingChunks));
}
// prep for merging
int64_t contigStartIdx = 0;
char *referenceSequenceName = stString_copy(chunks[0]->seqName);
stList *contigChunkPositions = stList_construct3(0, (void(*)(void*))stIntTuple_destruct);
stList *contigNames = stList_construct3(0, (void(*)(void*))free);
st_logCritical("> Merging results from %"PRIu64" chunks.\n", chunkCount);
// find which chunks belong to each contig
for (int64_t chunkIdx = 1; chunkIdx <= chunkCount; chunkIdx++) {
// we encountered the last chunk in the contig (end of list or new refSeqName)
if (chunkIdx == chunkCount || !stString_eq(referenceSequenceName, chunks[chunkIdx]->seqName)) {
stList_append(contigChunkPositions, stIntTuple_construct2(contigStartIdx, chunkIdx));
stList_append(contigNames, referenceSequenceName);
// Reset for next reference sequence
if (chunkIdx != chunkCount) {
contigStartIdx = chunkIdx;
referenceSequenceName = stString_copy(chunks[chunkIdx]->seqName);
}
}
// nothing to do otherwise, just wait until end or new contig
}
ChunkToStitch **stitchedContigs = st_calloc(stList_length(contigChunkPositions), sizeof(ChunkToStitch*));
// in parallel, stitch each contig linearly
#pragma omp parallel for schedule(static,1)
for (int64_t contigIdx = 0; contigIdx < stList_length(contigChunkPositions); contigIdx++) {
// get indices
stIntTuple *contigChunkPos = stList_get(contigChunkPositions, contigIdx);
int64_t startIdx = stIntTuple_get(contigChunkPos, 0);
int64_t endIdxExcl = stIntTuple_get(contigChunkPos, 1);
// merge and write out
ChunkToStitch *stitched = mergeContigChunkz(chunks, startIdx, endIdxExcl, phased, outputChunkers->params);
stitched->seqName = stString_copy(stList_get(contigNames, contigIdx));
stitched->startOfSequence = true;
// update stitched state
for (int64_t i = startIdx; i < endIdxExcl; i++) {
if (switchedState != NULL) {
switchedState[i] = chunks[i]->wasSwitched;
}
chunkToStitch_destruct(chunks[i]);
}
stitchedContigs[contigIdx] = stitched;
}
// write everything single-threaded
for (int64_t contigIdx = 0; contigIdx < stList_length(contigChunkPositions); contigIdx++) {
ChunkToStitch *stitched = stitchedContigs[contigIdx];
// write contents
outputChunkers_writeChunk(outputChunkers, stitched);
// to write to bam, we need to add all these
if (readIdsHap1 != NULL && readIdsHap2 != NULL) {
stHash *chunkReadToProbHap1 = getReadNames(stitched->readsHap1Lines);
stHash *chunkReadToProbHap2 = getReadNames(stitched->readsHap2Lines);
stList *chunkReadsHap1 = stHash_getKeys(chunkReadToProbHap1);
stList *chunkReadsHap2 = stHash_getKeys(chunkReadToProbHap2);
stList_appendAll(readIdsHap1, chunkReadsHap1);
stList_appendAll(readIdsHap2, chunkReadsHap2);
stHash_setDestructKeys(chunkReadToProbHap1, NULL);
stHash_setDestructKeys(chunkReadToProbHap2, NULL);
stList_setDestructor(chunkReadsHap1, NULL);
stList_setDestructor(chunkReadsHap2, NULL);
stHash_destruct(chunkReadToProbHap1);
stHash_destruct(chunkReadToProbHap2);
stList_destruct(chunkReadsHap1);
stList_destruct(chunkReadsHap2);
}
// Clean up
chunkToStitch_destruct(stitched);
}
// cleanup
stList_destruct(contigChunkPositions);
stList_destruct(contigNames);
free(stitchedContigs);
free(chunks); //chunks are freed as they're merged
}
void outputChunkers_destruct(OutputChunkers *outputChunkers) {
// Close the file streams and delete the temporary files of the temp file chunkers
for (int64_t i = 0; i < stList_length(outputChunkers->tempFileChunkers); i++) {
outputChunker_closeAndDeleteFiles(stList_get(outputChunkers->tempFileChunkers, i));
}
time_t start = time(NULL);
// Now cleanup the temp file chunkers
stList_destruct(outputChunkers->tempFileChunkers);
// Cleanup the final output chunkers
outputChunker_destruct(outputChunkers->outputChunkerHap1);
if (outputChunkers->outputChunkerHap2 != NULL) {
outputChunker_destruct(outputChunkers->outputChunkerHap2);
}
free(outputChunkers);
char *timeDes = getTimeDescriptorFromSeconds(time(NULL) - start);
st_logInfo(" Closed remaining output chunking infrastructure in %s\n", timeDes);
free(timeDes);
} |
core_dgemm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgemm.c, normal z -> d, Fri Sep 28 17:38:18 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include <omp.h>
/***************************************************************************//**
*
* @ingroup core_gemm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^T, \f]
*
* alpha and beta are scalars, and A, B and C are matrices, with op( A )
* an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] transb
* - PlasmaNoTrans: B is not transposed,
* - PlasmaTrans: B is transposed,
* - PlasmaConjTrans: B is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrix op( A ) and of the matrix C.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix op( B ) and of the matrix C.
* n >= 0.
*
* @param[in] k
* The number of columns of the matrix op( A ) and the number of rows
* of the matrix op( B ). k >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans,
* and is m otherwise.
*
* @param[in] lda
* The leading dimension of the array A.
* When transa = PlasmaNoTrans, lda >= max(1,m),
* otherwise, lda >= max(1,k).
*
* @param[in] B
* An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans,
* and is k otherwise.
*
* @param[in] ldb
* The leading dimension of the array B.
* When transb = PlasmaNoTrans, ldb >= max(1,k),
* otherwise, ldb >= max(1,n).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n
* matrix ( alpha*op( A )*op( B ) + beta*C ).
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dgemm(plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
double alpha, const double *A, int lda,
const double *B, int ldb,
double beta, double *C, int ldc)
{
cblas_dgemm(CblasColMajor,
(CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb,
m, n, k,
(alpha), A, lda,
B, ldb,
(beta), C, ldc);
}
/******************************************************************************/
void plasma_core_omp_dgemm(
plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
double alpha, const double *A, int lda,
const double *B, int ldb,
double beta, double *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (transa == PlasmaNoTrans)
ak = k;
else
ak = m;
int bk;
if (transb == PlasmaNoTrans)
bk = n;
else
bk = k;
if (sequence->status == PlasmaSuccess)
{
int transa_ = transa, transb_ = transb;
int size_A = lda*ak, size_B = ldb*bk,size_C =ldc*n;
#pragma omp target nowait \
depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*bk]) \
depend(inout:C[0:ldc*n]) \
firstprivate(m,n,k,alpha,beta,lda,ak) \
firstprivate(ldb,bk,ldc,transa_,transb_) \
map(to:A[0:size_A],B[0:size_B]) \
map(tofrom:C[0:size_C])
{
int block_size = 4, size_matrix = lda;
int m_new = m/block_size;
int n_new = n/block_size;
int k_new = k/block_size;
int zbeta;
#pragma omp parallel
#pragma omp single
{
for (int m_ = 0; m_ < m_new; m_++){
for (int n_ = 0; n_ < n_new; n_++) {
for (int k_ = 0; k_ < k_new; k_++) {
int lda_ = m_ * block_size + k_ * block_size;
int ldb_ = k_ * block_size + n_ * block_size;
int ldc_ = m_ * block_size + n_ * block_size;
#pragma omp task \
depend(in:A[lda_]) \
depend(in:B[0:ldb_]) \
depend(inout:C[0:ldc_])
{
double A_new [block_size*block_size],B_new[block_size*block_size],C_new[block_size*block_size];
int i_a = m_, j_a=k_;
int i_b = k_, j_b=n_;
int i_c = m_, j_c=n_;
int insert = 0;
for(int l=0;l<block_size;l++)
{
int before_a = i_a*block_size+j_a*lda*block_size + l*size_matrix;
int before_b = i_b*block_size+j_b*lda*block_size + l*size_matrix;
int before_c = i_c*block_size+j_c*lda*block_size + l*size_matrix;
for (int i = 0; i < block_size; i++)
{
A_new[insert] = A[before_a];
B_new[insert] = B[before_b];
C_new[insert] = C[before_c];
insert += 1;
before_a += 1;
before_b += 1;
before_c += 1;
}
}
double zbeta = k_== 0 ? beta : 1.0;
plasma_core_dgemm(transa_, transb_,
block_size, block_size, block_size,
alpha, (const double *) A_new, block_size,
(const double *) B_new, block_size,
zbeta, (double *) C_new, block_size);
insert = 0;
for(int l=0;l<block_size;l++)
{
int before_c = i_c*block_size+j_c*lda*block_size + l*size_matrix;
for (int i = 0; i < block_size; i++)
{
C[before_c]=C_new[insert];
insert += 1;
before_c += 1;
}
}
}
}
}
}
}
}
/*#pragma omp target nowait \
depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*bk]) \
depend(inout:C[0:ldc*n]) \
firstprivate(m,n,k,alpha,beta,lda,ak) \
firstprivate(ldb,bk,ldc,transa_,transb_) \
map(to:A[0:size_A],B[0:size_B]) \
map(tofrom:C[0:size_C])
{
//printf("TERMINO\n");
int block_size = 500, size_matrix = lda;
int m_new = m/block_size;
int n_new = n/block_size;
int k_new = k/block_size;
int zbeta;
//#pragma omp parallel
//#pragma omp single
for (int m_ = 0; m_ < m_new; m_++) {
#pragma omp parallel for
for (int n_ = 0; n_ < n_new; n_++) {
for (int k_ = 0; k_ < k_new; k_++) {
double A_new [block_size*block_size],B_new[block_size*block_size],C_new[block_size*block_size];
int i_a = m_, j_a=k_;
int i_b = k_, j_b=n_;
int i_c = m_, j_c=n_;
int insert = 0;
for(int l=0;l<block_size;l++)
{
int before_a = i_a*block_size+j_a*lda*block_size + l*size_matrix;
int before_b = i_b*block_size+j_b*lda*block_size + l*size_matrix;
int before_c = i_c*block_size+j_c*lda*block_size + l*size_matrix;
for (int i = 0; i < block_size; i++)
{
A_new[insert] = A[before_a];
B_new[insert] = B[before_b];
C_new[insert] = C[before_c];
insert += 1;
before_a += 1;
before_b += 1;
before_c += 1;
}
}
double zbeta = k_== 0 ? beta : 1.0;
plasma_core_dgemm(transa_, transb_,
block_size, block_size, block_size,
alpha, (const double *) A_new, block_size,
(const double *) B_new, block_size,
zbeta, (double *) C_new, block_size);
insert = 0;
for(int l=0;l<block_size;l++)
{
int before_c = i_c*block_size+j_c*lda*block_size + l*size_matrix;
for (int i = 0; i < block_size; i++)
{
C[before_c]=C_new[insert];
insert += 1;
before_c += 1;
}
}
}
}
}
//printf("ENTRO\n");
}
*/
}
}
|
omp_threadprivate.c | /*
*
* threadprivate is tested in 2 ways
* 1. The global variable declared as threadprivate should have local copy
* for each thread. Otherwise race condition for wrong result
* 2. if the value of local copy is retained for two adjacent parallel region
*/
#include "omp_testsuite.h"
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
static int sum0 = 0;
#pragma omp threadprivate(sum0)
static int myvalue = 0;
#pragma omp threadprivate(myvalue)
int
check_omp_threadprivate (FILE * logFile)
{
int sum = 0;
int known_sum;
int i;
int iter;
int *data;
int size;
int failed = 0;
int my_random;
omp_set_dynamic (0);
#pragma omp parallel
{
sum0 = 0;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++)
{
sum0 = sum0 + i;
} /*end of for */
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
} /* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
if (known_sum != sum)
{
fprintf (logFile, " known_sum = %d , sum = %d \n", known_sum, sum);
}
/* the next parallel region is just used to get the number of threads */
omp_set_dynamic (0);
#pragma omp parallel
{
#pragma omp master
{
size = omp_get_num_threads ();
data = (int *) malloc (size * sizeof (int));
}
} /* end parallel */
srand (45);
for (iter = 0; iter < 100; iter++)
{
my_random = rand (); /* random number generator is called inside serial region */
/* the first parallel region is used to initialiye myvalue and the array with my_random+rank */
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
myvalue = data[rank] = my_random + rank;
}
/* the second parallel region verifies that the value of "myvalue" is retained */
#pragma omp parallel reduction(+:failed)
{
int rank;
rank = omp_get_thread_num ();
failed = failed + (myvalue != data[rank]);
if (myvalue != data[rank])
{
fprintf (logFile, " myvalue = %d, data[rank]= %d\n", myvalue,data[rank]);
}
}
}
free (data);
return (known_sum == sum) && !failed;
} /* end of check_threadprivate */
static int crosssum0 = 0;
/*#pragma omp threadprivate(crosssum0)*/
static int crossmyvalue = 0;
/*#pragma omp threadprivate(crossmyvalue)*/
int
crosscheck_omp_threadprivate (FILE * logFile)
{
int sum = 0;
int known_sum;
int i;
int iter;
int *data;
int size;
int failed = 0;
int my_random;
omp_set_dynamic (0);
#pragma omp parallel
{
crosssum0 = 0;
#pragma omp for
for (i = 1; i < LOOPCOUNT; i++)
{
crosssum0 = crosssum0 + i;
} /*end of for */
#pragma omp critical
{
sum = sum + crosssum0;
} /*end of critical */
} /* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp master
{
size = omp_get_num_threads ();
data = (int *) malloc (size * sizeof (int));
}
}
srand (45);
for (iter = 0; iter < 100; iter++)
{
my_random = rand ();
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
crossmyvalue = data[rank] = my_random + rank;
}
#pragma omp parallel reduction(+:failed)
{
int rank;
rank = omp_get_thread_num ();
failed = failed + (crossmyvalue != data[rank]);
}
}
free (data);
return (known_sum == sum) && !failed;
} /* end of crosscheck_threadprivate */
|
main.c | #include "main.h"
#include "helpers/helpers.h"
#define TOTAL_CORPOS 10
CORPO *generate_mock_bodies(int body_count){
CORPO *bodies = (CORPO *)malloc(sizeof(CORPO)*body_count);
for(int i = 0; i < body_count; i++){
bodies[i].p = get_vector_cartesian(i, i, i);
bodies[i].m = i;
bodies[i].f = get_vector_cartesian(0, 0, 0);
bodies[i].v = get_vector_cartesian(0, 0, 0);
//print_body(&bodies[i]);
}
return bodies;
}
int main(int argc, char ** argv){
//CORPO *random_bodies = generate_mock_bodies(TOTAL_CORPOS);
int total_time, delta_time, body_count;
clock_t t1,t2;
// CORPO *P = generate_mock_bodies(TOTAL_CORPOS);
body_count = get_body_count();
CORPO *P = read_from_file(&total_time, &delta_time, body_count);
VECTOR F = get_vector_cartesian(0,0,0);
VECTOR p_anterior, v_anterior;
FILE *fp;
fp = fopen(OUTPUT_FILE, "w");
if (fp == NULL){
printf("\n\nOcorreu um erro ao abrir o ficheiro!\n");
return 1;
}
printf("total = %d. delta = %d\n", total_time, delta_time);
printf("body count=%i\n\n\n", body_count);
t1 = clock();
printf("A comecar a simulacao com %i processadores. Hello P%i.\n", omp_get_num_threads(), omp_get_thread_num());
for(int k = 0; k < total_time; k += delta_time){
if(omp_get_thread_num() == 0){
printf("Iteracao temporal %i/%i (k = %i).\n", k/delta_time, total_time/delta_time, k/delta_time);
}
#pragma omp parallel num_threads(4)
{
#pragma omp for
for (int i=0; i<body_count; i++){
write_position_to_file(P[i].p, fp);
//printf("fx = %.13f fy = %.13f fz = %.13f\n", P[i].f.x, P[i].f.y, P[i].f.z);
//print_body(&P[i]);
//#pragma omp for
for(int j=0; j<body_count; j++ ){
if( j != i ){
F = sum_vector(F, forca(P[i], P[j]));
}
}
//#pragma sections
{
//#pragma section
//{
P[i].f = F;
P[i].a=aceleracao(P[i].f, P[i].m);
//}
//#pragma section
//{
v_anterior = P[i].v;
P[i].v=velocidade(v_anterior, P[i].a, delta_time);
//}
//#pragma sections
//{
p_anterior = P[i].p;
P[i].p=posicao(p_anterior, P[i].v, delta_time);
//}
// printf("px = %0.10f; py = %0.10f; pz = %0.10f;\n",
// P[i].p.x,
// P[i].p.y,
// P[i].p.z
// );
}
//quando correr todas os corpos na iteracao atual
// imprime o \n para sinalizar a nova iteracao
if(i == body_count - 1){
fprintf(fp, "\n");
}
}
}
}
fclose(fp);
t2=clock();
printf("Simulacao gerada em %6.3f secs.\n",(((double)(t2-t1))/CLOCKS_PER_SEC));
return 0;
} |
starspot.c | /******************************************************************************/
/*** file: starspot.c ***/
/*** first version: 1.0 2006/08/29 X.Bonfils ***/
/*** Centro de Astronomia e Astrofisica da Universidade de Lisboa ***/
/*** this version: 2.0 2014/12/05 X. Dumusque ***/
/*** Harvard-Smithsonian Center for Astrophysics ***/
/******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stddef.h>
#include "starspot.h"
#include <math.h>
#include "gsl/gsl_errno.h"
#include "gsl/gsl_math.h"
#include "gsl/gsl_spline.h"
#include "gsl/gsl_min.h"
#include "gsl/gsl_multimin.h"
#include "gsl/gsl_multifit.h"
#include "gsl/gsl_multifit_nlin.h"
FILE *file; /* Declare the file pointer */
struct gauss_params
{
/*
* Structure of the parameters of a Gaussian
*/
double *x, *y, *err, *fit;
int n, m;
};
int gauss_f(const gsl_vector *v, void *p, gsl_vector *f)
{
/*
* Function to calculate the residuals of a Gaussian fit
*/
double c = gsl_vector_get(v, 0);
double k = gsl_vector_get(v, 1);
double x0 = gsl_vector_get(v, 2);
double fwhm = gsl_vector_get(v, 3);
double *x = ((struct gauss_params *) p)->x;
double *y = ((struct gauss_params *) p)->y;
double *err = ((struct gauss_params *) p)->err;
double *fit = ((struct gauss_params *) p)->fit;
int n = ((struct gauss_params *) p)->n;
int i;
double sigma = fwhm/2/sqrt(2*log(2));
for (i = 0; i < n; i++) {
fit[i] = c + k * exp(-(x[i]-x0)*(x[i]-x0)/2/sigma/sigma);
gsl_vector_set (f, i, (fit[i]-y[i])/err[i]);
}
return GSL_SUCCESS;
}
int gauss_df(const gsl_vector *v, void *p, gsl_matrix *J)
{
/*
* Function to calculate the derivative of a Gaussian
*/
double k = gsl_vector_get(v, 1);
double x0 = gsl_vector_get(v, 2);
double fwhm = gsl_vector_get(v, 3);
double *x = ((struct gauss_params *) p)->x;
double *err = ((struct gauss_params *) p)->err;
int n = ((struct gauss_params *) p)->n;
int i;
double sigma = fwhm/2/sqrt(2*log(2));
for (i = 0; i < n; i++) {
double e = exp(-(x[i]-x0)*(x[i]-x0)/2/sigma/sigma);
gsl_matrix_set (J, i, 0, 1/err[i]);
gsl_matrix_set (J, i, 1, e/err[i]);
gsl_matrix_set (J, i, 2, k/err[i]*e*(x[i]-x0)/sigma/sigma);
gsl_matrix_set (J, i, 3, k/err[i]*e*(x[i]-x0)*(x[i]-x0)/sigma/sigma/sigma/2/sqrt(2*log(2)));
}
return GSL_SUCCESS;
}
int gauss_fdf(const gsl_vector *v, void *p, gsl_vector *f, gsl_matrix *J)
{
/*
* Function that calls the functions "gauss_f" and "gauss_df"
*/
gauss_f (v, p, f);
gauss_df (v, p, J);
return GSL_SUCCESS;
}
void gauss_bis(double *vrad_ccf, double *ccf, double *err, int n1, double *mod, double *para, double *depth, double *bis, int len_depth)
{
/*
* Function to fit a Gaussian to the CCF
* and calculate the bisector of the CCF
*/
struct gauss_params p;
int i, j = 0, status, npar = 4;
double c=0, k=0, vrad=0, fwhm=0;
double sig_c=0, sig_k=0, sig_vrad=0, sig_fwhm=0;
double *x;
gsl_vector *v = gsl_vector_alloc (npar);
gsl_matrix *covar = gsl_matrix_alloc (npar, npar);
const gsl_multifit_fdfsolver_type *T;
gsl_multifit_fdfsolver *s;
gsl_multifit_function_fdf F;
x = vrad_ccf;
p.y = ccf;
p.err = err;
p.n = n1;
p.x = (double *) malloc(n1*sizeof(double));
p.fit = mod;
for (i = 0; i < p.n; i++)
p.x[i] = x[i]-x[0];
c = (p.y[0]+p.y[p.n-1])/2;
fwhm = (p.x[p.n-1]-p.x[0])/5.;
k = 0.; vrad = p.x[p.n/2];
for (i = 0; i < p.n; i++)
if (abs(p.y[i]-c) > abs(k)) { k = p.y[i]-c; vrad = p.x[i]; }
F.f = &gauss_f;
F.df = &gauss_df;
F.fdf = &gauss_fdf;
F.n = p.n;
F.p = npar;
F.params = &p;
T = gsl_multifit_fdfsolver_lmsder;
s = gsl_multifit_fdfsolver_alloc (T, p.n, npar);
gsl_vector_set(v, 0, c);
gsl_vector_set(v, 1, k);
gsl_vector_set(v, 2, vrad);
gsl_vector_set(v, 3, fwhm);
gsl_multifit_fdfsolver_set (s, &F, v);
do {
j++;
status = gsl_multifit_fdfsolver_iterate (s);
status = gsl_multifit_test_delta(s->dx, s->x, 0.0, 1e-5);
}
while (status == GSL_CONTINUE && j < 10000);
gsl_matrix *J = gsl_matrix_alloc(p.n, npar);
gsl_multifit_fdfsolver_jac(s, J);
gsl_multifit_covar (J, 0.0, covar);
// gsl_multifit_covar (s->J, 0.0, covar);
c = gsl_vector_get(s->x, 0); sig_c = sqrt(gsl_matrix_get(covar,0,0));
k = gsl_vector_get(s->x, 1); sig_k = sqrt(gsl_matrix_get(covar,1,1));
vrad = gsl_vector_get(s->x, 2); sig_vrad = sqrt(gsl_matrix_get(covar,2,2));
fwhm = gsl_vector_get(s->x, 3); sig_fwhm = sqrt(gsl_matrix_get(covar,3,3));
/* Bisector part : */
// Declarations...
double sigma = fwhm/2./pow(2.*log(2.),.5), vr, v0=vrad+x[0];
double dCCFdRV, d2CCFdRV2, d2RVdCCF2;
double *norm_CCF, *p0, *p1, *p2;
// Allocations...
norm_CCF = (double *)malloc(sizeof(double)*p.n);
p0 = (double *)malloc(sizeof(double)*p.n);
p1 = (double *)malloc(sizeof(double)*p.n);
p2 = (double *)malloc(sizeof(double)*p.n);
// Initialization...
for (i=0; i<p.n; i++) norm_CCF[i] = -c/k*(1.-p.y[i]/c);
for (i=0; i<p.n-1; i++) {
vr = (x[i]+x[i+1])/2.;
dCCFdRV = -(vr-v0)/pow(sigma,2)*exp(-pow((vr-v0),2)/2./pow(sigma,2));
d2CCFdRV2 = (pow((vr-v0),2)/pow(sigma,2)-1)/pow(sigma,2)*exp(-pow((vr-v0),2)/2/pow(sigma,2));
d2RVdCCF2 = -d2CCFdRV2/pow(dCCFdRV,3);
p2[i] = d2RVdCCF2/2;
p1[i] = (x[i+1]-x[i]-p2[i]*(pow(norm_CCF[i+1],2)-pow(norm_CCF[i],2)))/(norm_CCF[i+1]-norm_CCF[i]);
p0[i] = x[i]-p1[i]*norm_CCF[i]-p2[i]*pow(norm_CCF[i],2);
};//};
int ind_max = 0, i_b, i_r;
for (i=0; i<p.n; i++) if (norm_CCF[i]>norm_CCF[ind_max]) ind_max=i;
for (i=0; i<len_depth; i++) {
i_b = ind_max; i_r = ind_max;
while ((norm_CCF[i_b] > depth[i]) && (i_b > 1)) i_b--;
while ((norm_CCF[i_r+1] > depth[i]) && (i_r < (p.n-2))) i_r++;
bis[i] = (p0[i_b]+p0[i_r]) + (p1[i_b]+p1[i_r])*depth[i] + (p2[i_b]+p2[i_r])*pow(depth[i],2);
bis[i] /= 2.;
};
int n2=0, n3=0;
double RV_top=0., RV_bottom=0., span=0.;
for (i=0; i<len_depth; i++) {
if ((depth[i]>=0.1) && (depth[i] <= 0.4)) {
n2++;
RV_top += bis[i];};
if ((depth[i]>=0.6) && (depth[i] <= 0.9)) {
n3++;
RV_bottom += bis[i];};
};
RV_top /= n2;
RV_bottom /= n3;
span = RV_top-RV_bottom;
vrad = vrad+vrad_ccf[0];
para[0] = c;
para[1] = k;
para[2] = vrad;
para[3] = fwhm;
para[4] = sig_c;
para[5] = sig_k;
para[6] = sig_vrad;
para[7] = sig_fwhm;
para[8] = span;
gsl_vector_free(v);
gsl_matrix_free(covar);
gsl_multifit_fdfsolver_free(s);
free(p.x); free(norm_CCF); free(p0); free(p1); free(p2);
}
void gauss_v0(double *vrad_ccf, double *ccf, double *err,
int n1, double *mod, double *para)
{
/*
* Function to fit a Gaussian to the CCF
* and return the center RV
*/
struct gauss_params p;
int i, j = 0, status, npar = 4;
double c=0, k=0, vrad=0, fwhm=0;
double *x;
gsl_vector *v = gsl_vector_alloc (npar);
gsl_matrix *covar = gsl_matrix_alloc (npar, npar);
const gsl_multifit_fdfsolver_type *T;
gsl_multifit_fdfsolver *s;
gsl_multifit_function_fdf F;
x = vrad_ccf;
p.y = ccf;
p.err = err;
p.n = n1;
p.x = (double *) malloc(n1*sizeof(double));
p.fit = mod;
for (i = 0; i < p.n; i++)
p.x[i] = x[i]-x[0];
c = (p.y[0]+p.y[p.n-1])/2;
fwhm = (p.x[p.n-1]-p.x[0])/5.;
k = 0.; vrad = p.x[p.n/2];
for (i = 0; i < p.n; i++)
if (abs(p.y[i]-c) > abs(k)) { k = p.y[i]-c; vrad = p.x[i]; }
F.f = &gauss_f;
F.df = &gauss_df;
F.fdf = &gauss_fdf;
F.n = p.n;
F.p = npar;
F.params = &p;
T = gsl_multifit_fdfsolver_lmsder;
s = gsl_multifit_fdfsolver_alloc (T, p.n, npar);
gsl_vector_set(v, 0, c);
gsl_vector_set(v, 1, k);
gsl_vector_set(v, 2, vrad);
gsl_vector_set(v, 3, fwhm);
gsl_multifit_fdfsolver_set (s, &F, v);
do {
j++;
status = gsl_multifit_fdfsolver_iterate (s);
status = gsl_multifit_test_delta(s->dx, s->x, 0.0, 1e-5);
}
while (status == GSL_CONTINUE && j < 10000);
gsl_matrix *J = gsl_matrix_alloc(p.n, npar);
gsl_multifit_fdfsolver_jac(s, J);
gsl_multifit_covar (J, 0.0, covar);
// gsl_multifit_covar (s->J, 0.0, covar);
vrad = gsl_vector_get(s->x, 2); //sig_vrad = sqrt(gsl_matrix_get(covar,2,2));
vrad = vrad+vrad_ccf[0];
para[0] = vrad;
gsl_vector_free(v);
gsl_matrix_free(covar);
gsl_multifit_fdfsolver_free(s);
free(p.x);
}
double rndup(double n,int nb_decimal)
{
/*
* Function to round up a double type at nb_decimal
*/
double t;
t=n*pow(10,nb_decimal) - floor(n*pow(10,nb_decimal));
if (t>=0.5)
{
n*=pow(10,nb_decimal);//where n is the multi-decimal double
n = ceil(n);
n/=pow(10,nb_decimal);
}
else
{
n*=pow(10,nb_decimal);//where n is the multi-decimal double
n = floor(n);
n/=pow(10,nb_decimal);
}
return n;
}
double Delta_lambda(double line_width, double lambda_line0)
{
/*
* Calculates the broadening of a spectral line (or shift) taking into account
* the velocity and the wavelength. line_width in [m/s] and lambda_line0 in [Angstrom]
*/
double c=299792458.;
double line_spread=0.;
double beta;
// relativist case
beta = line_width/c;
line_spread = -1 * lambda_line0 * (1 - sqrt((1+beta)/(1-beta)));
return line_spread;
}
void shifting_CCF(double *lambda, double *CCF, double *CCF_blueshifted, double v_shift,int n1)
{
/*
* Shift the CCF with a given velocity v_shift, doing a linear interpolation
*/
int i;
double *CCF_derivative;
CCF_derivative = (double *) malloc(n1*sizeof(double));
for (i=0;i<n1;i++)
{
CCF_derivative[i] = 0.0;
CCF_blueshifted[i] = 0.0;
}
// Calculates the CCF derivative
if (lambda[1]-lambda[0] == 0) CCF_derivative[0] = 0;
else CCF_derivative[0] = (CCF[1] - CCF[0])/(lambda[1]-lambda[0]);
for (i=1;i<n1-1;i++)
{
if (lambda[i+1]-lambda[i] == 0) CCF_derivative[i] = CCF_derivative[i-1];
else
{
CCF_derivative[i] = (CCF[i+1] - CCF[i])/(lambda[i+1]-lambda[i]);
}
}
CCF_derivative[i] = 0.;
// Calculates the new values of the shifted CCF doing a linear interpolation
for (i=0;i<n1;i++)
{
if ((v_shift >= 0) || (i == 0))
{
CCF_blueshifted[i] = CCF[i] - v_shift * CCF_derivative[i]; // dy/dx = derivative => dy = derivative*dx
}
else if (v_shift < 0)
{
CCF_blueshifted[i] = CCF[i] - v_shift * CCF_derivative[i-1]; // dy/dx = derivative => dy = derivative*dx
}
}
free(CCF_derivative);
}
// Calculates the Planck function
double loi_Planck(double lambda0, int Temp)
{
double c = 299792458.; // speed of light m/s
double h = 6.62606896e-34; // Planck constant
double k_b = 1.380e-23; // Boltzmann constant
return 2*h*pow(c,2)*1./pow(lambda0,5)*1./(exp((h*c)/(lambda0*k_b*Temp))-1);
}
void itot(double v, double i, double limba1, double limba2, double modif_bis_quad, double modif_bis_lin, double modif_bis_cte, int grid,
double *vrad_ccf, double *intensity_ccf,
double v_interval, int n_v, int n, double *f_star, double *sum_star)
{
/*
* Calculates the flux and CCF in each cell of the grid and integrate
* over the entire stellar disc to have the integrated flux and CCF
*/
// double omega, delta_grid, delta_v;
// double y, z, delta, r_cos, limb;
// int iy, j, iz, diff_CCF_non_v_and_v,n_v_shifted_quotient;
// double n_v_shifted, n_v_shifted_remainder;
// double *intensity_ccf_shift;
//
// intensity_ccf_shift = (double *)malloc(sizeof(double)*n);
//
// /* Conversions */
// i = i * pi/180. ; // [degree] --> [radian]
//
// omega = v;
// delta_grid = 2./grid; // step of the grid. grid goes from -1 to 1, therefore 2 in total
// // v_interval is from the velocity 0 to the edge of the spectrum taking into account minimal or maximal rotation (width - v to 0 or 0 to width + v).
// // n_v is the number of points for all the CCF from minimum rotation to maximum one (from width - v to width + v).
// // n_v represent therefore the double than v_interval, we therefore have to multiply v_interval by 2.
// delta_v = 2.*v_interval/(n_v-1); //step in speed of the CCF. There is (n_v-1) intervals
//
// /* Calculates the total stellar intensity (without spots) */
//
// // Scan of each cell on the grid
// for (iy=0; iy<=grid; iy++) // y-scan
// {
// y = -1. + iy*delta_grid; // y between -1 et 1
// // Give the velocity of rotation for the given grid cell as a function of y.
// // For y=-1 => Vrot min, for y=1 => Vrot max, and for y=0 => Vrot = 0.
// delta = y * omega * sin(i);
//
// for (iz=0; iz<=grid; iz++) // z-scan
// {
// z = -1. + iz*delta_grid;// z between -1 et 1
// // projected radius on the sky smaller than 1,
// // which means that we are on the stellar disc
// if ((y*y+z*z)<=1.)
// {
// /* limb-darkening */
// // sqrt(r^2-(y^2+z^2)) where r=1.
// // This is equal to 1 at the disc center, and 0 on the limb.
// // Often referred in the literature as cos(theta)
// r_cos = pow(1.-(y*y+z*z),.5);
//
// // intensity due to limb-darkening (Mandel & Agol 2002)
// limb = 1. - limba1*(1-r_cos) - limba2*(1-r_cos)*(1-r_cos);
//
// //The CCF is defined between -20 and +20 km/s with a sampling "delta_v"
// // of 0.1 km/s, and with boundaries normalized at 1.
// // To account for stellar rotation in each cell of the simulation,
// // the CCF will be shifted depending on the position of the cell
// // on the stellar disc. To shift the CCF by a RV of x km/s,
// // we will first shift the CCF by
// // "n_v_shifted_quotient" = int(x/sampling) steps,
// // and then interpolate the CCF with the remaining velocity
// // "n_v_shifted_remainder"
// // This will be done by shifting the CCF by
// // an integer of steps and then interpolating the CCF
// // to match the correct velocity
// // The sampling of the CCF is every 100 m/s,
// // so 2 km/s corresponds to a shift of 20 steps.
//
// // by how much steps the CCF is shifted due to rotation
// n_v_shifted = delta/delta_v;
//
// n_v_shifted_quotient = rndup(n_v_shifted, 0);
// n_v_shifted_remainder = (delta - n_v_shifted_quotient*delta_v);
//
// double v_shift = n_v_shifted_remainder;
// //shifting the CCF with the remainder of n_v_shifted,
// // the quotient will be taken into account by shifting
// // all the points of the spectrum
// shifting_CCF(vrad_ccf, intensity_ccf, intensity_ccf_shift, v_shift, n);
//
// // difference in number of steps between the CCF
// // without any rotation and the one with rotation
// diff_CCF_non_v_and_v = (n_v - n) / 2.;
//
// // To take into account the rotation,
// // we increase the width of the CCF.
// // Because the original CCF is only defined between -20 and +20 km/s,
// // we have to extrapolate on each side of the boundaries of the CCF
// for (j=0;j<diff_CCF_non_v_and_v+n_v_shifted_quotient;j++)
// {
// // extrapolation on the left of the CCF
// // with the value of the left boundary,
// // weighted by the limb-darkening
// f_star[j] += intensity_ccf_shift[0]*limb;
// }
// for (j=diff_CCF_non_v_and_v+n_v_shifted_quotient;j<n+(diff_CCF_non_v_and_v+n_v_shifted_quotient);j++)
// {
// // value of the CCF, weighted by the limb-darkening
// f_star[j] += (intensity_ccf_shift[j-(diff_CCF_non_v_and_v+n_v_shifted_quotient)]) * limb;
// }
// for (j=n+(diff_CCF_non_v_and_v+n_v_shifted_quotient);j<n_v;j++)
// {
// // extrapolation on the right of the CCF
// // with the value of the right boundary,
// // weighted by the limb-darkening
// f_star[j] += intensity_ccf_shift[n-1]*limb;
// }
// // calculates the total flux taking into account the limb-darkening
// // (intensity_ccf_shift[0] is very close to 1)
// *sum_star += intensity_ccf_shift[0]*limb;
// }
// }
// }
//
// // Free memory
// free(intensity_ccf_shift);
}
void starmap(double v, double i, double limba1, double limba2, int grid,
double **Fmap, double **Vmap)
{
/*
* Calculates the flux F and velocity V maps of the star
* taking into account the rotational velocity,
* the stellar inclination and the limb-darkening
* v [km/s] stellar rotation
* i [degree] inclination of the rotational axis / sky plane
* limba1 [0-1.] linear coefficient of the quadratic limb-darkening law
* limba1 [0-1.] quadratic coefficient of the quadratic limb-darkening law
* grid
* Fmap [arb. unit] Flux map (2D array)
* Vmap [km/s] Velocity map (2D array)
*/
double r_cos, limb, y, z, delta_grid, delta_v;
int iy, iz;
v = v*sin(i/180.*pi); //Projected rotational velocity
delta_grid = 2./grid; //The stellar disc goes from -1 to 1, therefore 2
delta_v = 2.*v/grid; //The stellar disc goes from -1 to 1, therefore 2
for (iy=0; iy<grid; iy++) // y-axis scan...
{
y = -1 + iy*delta_grid;
for (iz=0; iz<grid; iz++) // z-axis scan
{
z = -1 + iz*delta_grid;
if ((y*y+z*z)<1) // If the
{
//limb-darkening
r_cos = pow(1.-(y*y+z*z),.5);
limb = 1. - limba1*(1-r_cos) - limba2*(1-r_cos)*(1-r_cos);
Fmap[iy][iz]= limb;
Vmap[iy][iz]= -v+iy*delta_v;
}
else
{
Fmap[iy][iz]=0;
Vmap[iy][iz]=-9999;
}
}
}
}
void spot_init(double s, double longitude, double latitude, double inclination,
int nrho, double **xyz)
{
/* Position of the spot initialized at the disc center
(star rotates around the z axis)
s [spot radius]
longitude [degree]
latitude [degree]
inclination [degree] i=0 -> pole-on (North)
i=90 -> equator-on
nrho : Spot circumference resolution
xyz : real position of the spot
xyz2 : position of the spot at the disc center */
double *rho, rho_step, **xyz2;
int j;
/* Conversion [deg] -> [rad] */
longitude *= pi/180.;
latitude *= pi/180.;
inclination *= pi/180.;
// In this initial disc center position,
// we calculate the coordinates (x,y,z) of points
// of the active region's circumference
rho = (double *)malloc(sizeof(double)*nrho);
xyz2 = (double **)malloc(sizeof(double *)*nrho);
for (j=0; j<nrho; j++) xyz2[j] = (double *)malloc(sizeof(double)*3);
// A circular active region has a resolution given by nrho,
// which implies that we will have a point on the disc circumference
// every 2*pi/(nrho-1)
// -1 because there is (nrho-1) intervals
rho_step = 2.*pi/(nrho-1);
#pragma omp parallel for if(nrho>500)
for (j=0; j<nrho; j++) rho[j] = -pi + j*rho_step; //rho goes from -pi to pi
//x=sqrt(r^2-s^2), where r is the radius. r=1 therefore r^2=1
// The active region is on the surface, so very close to x=1.
// However with the curvature of the sphere, the circumference of
// the active region is at x=sqrt(r^2-s^2)
#pragma omp parallel for if(nrho>500)
for (j=0; j<nrho; j++) {
xyz2[j][0] = pow(1-s*s,.5);
xyz2[j][1] = s*cos(rho[j]); //projection of the circumference on the y axis
xyz2[j][2] = s*sin(rho[j]); //projection of the circumference on the z axis
}
// to account for the real projection of the spot,
// we rotate the star and look how the coordinates of the
// circumference of the spot change position
// according to latitude, longitude and inclination.
// It consists of three rotations
//
// Conventions :
// - when inclination=0 the star rotates around z axis
// - line of sight is along x-axis
// - sky plane = yz-plane
//
// Be Rx(alpha), Ry(beta), Rz(gamma) the rotations around the x, y and z axis
// with angles alpha, beta and gamma
// (counter-clockwise direction when looking toward the origin).
//
// The rotations to apply are:
// Ry(inclination) x Rz(longitude) x Ry(latitude) x X(x,y,z)
//
// | cos(b) 0 sin(b) | | cos(g) -sin(g) 0 |
// Ry(b) = | 0 1 0 | Rz(g) = | sin(g) cos(g) 0 |
// | -sin(b) 0 cos(b) | | 0 0 1 |
//
//
// |x'| | cos(b2)cos(g)cos(b)-sin(b)sin(b2) -sin(g)cos(b2) cos(b2)cos(g)sin(b)+sin(b2)cos(b) | |x|
// |y'| = | sin(g)cos(b) cos(g) sin(g)sin(b) | x |y|
// |z'| | -sin(b2)cos(g)cos(b)-cos(b2)sin(b) sin(b2)sin(g) -sin(b2)cos(g)sin(b)+cos(b2)cos(b) | |z|
double b = -latitude;
double g = longitude;
double b2 = pi/2.-inclination;
double R[3][3] = {{cos(b2)*cos(g)*cos(b)-sin(b)*sin(b2), -sin(g)*cos(b2), cos(b2)*cos(g)*sin(b)+sin(b2)*cos(b)},
{sin(g)*cos(b), cos(g), sin(g)*sin(b)},
{-sin(b2)*cos(g)*cos(b)-cos(b2)*sin(b), sin(b2)*sin(g), -sin(b2)*cos(g)*sin(b)+cos(b2)*cos(b)}};
// calculates the real xyz position of the active region
// after rotating the star to have the initial equatorial active region
// at the correct longitude and latitude,
// taking into account the stellar inclination
#pragma omp parallel for if(nrho>500)
for (j=0; j<nrho; j++) {
xyz[j][0] = R[0][0]*xyz2[j][0] + R[0][1]*xyz2[j][1] + R[0][2]*xyz2[j][2];
xyz[j][1] = R[1][0]*xyz2[j][0] + R[1][1]*xyz2[j][1] + R[1][2]*xyz2[j][2];
xyz[j][2] = R[2][0]*xyz2[j][0] + R[2][1]*xyz2[j][1] + R[2][2]*xyz2[j][2];
}
// Free memory
free(rho);
for (j=0; j<nrho; j++) free(xyz2[j]);
free(xyz2);
}
void spot_phase(double **xyz, double inclination, int nrho, double phase, double **xyz2)
{
int i;
double psi = -phase*(2*pi); //the phase is between 0 and 1, so psi is in radian between -2pi and 0.
inclination = inclination*pi/180.; // in radian
double axe[3] = {cos(inclination),0,sin(inclination)}; //projection of the rotation axis on the xyz coordinate
//The rotation around the axis (axe[0],axe[1],axe[2]) of an angle psi is given by the following matrix
//There is some sign difference here with respect to Wikipaedia for example because in this case psi is
//defined negative (c.f. a few lines before). In this case, cos(-psi)=cos(psi) -> no sign change,
//but sin(-psi)=-sin(psi) -> sign change.
double R[3][3] = {{(1-cos(psi))*axe[0]*axe[0] + cos(psi),
(1-cos(psi))*axe[0]*axe[1] + sin(psi)*axe[2],
(1-cos(psi))*axe[0]*axe[2] - sin(psi)*axe[1]},
{(1-cos(psi))*axe[1]*axe[0] - sin(psi)*axe[2],
(1-cos(psi))*axe[1]*axe[1] + cos(psi),
(1-cos(psi))*axe[1]*axe[2] + sin(psi)*axe[0]},
{(1-cos(psi))*axe[2]*axe[0] + sin(psi)*axe[1],
(1-cos(psi))*axe[2]*axe[1] - sin(psi)*axe[0],
(1-cos(psi))*axe[2]*axe[2] + cos(psi)}};
#pragma omp parallel for if(nrho>500)
for (i=0; i<nrho; i++) //calculates the xyz position of the active region circumference at phase psi
{
xyz2[i][0] = R[0][0]*xyz[i][0] + R[0][1]*xyz[i][1] + R[0][2]*xyz[i][2];
xyz2[i][1] = R[1][0]*xyz[i][0] + R[1][1]*xyz[i][1] + R[1][2]*xyz[i][2];
xyz2[i][2] = R[2][0]*xyz[i][0] + R[2][1]*xyz[i][1] + R[2][2]*xyz[i][2];
}
}
int spot_area(double **xlylzl, int nrho, int grid, int *iminy, int *iminz,
int *imaxy, int *imaxz)
{
// Determine a smaller yz-area of the stellar disk, where the active region is
// The different cases are :
// - the active region is completely visible (all x of the circumference >=0)
// - the active region is completely invisible (all x of the circumference <0)
// - the active region is on the disk edge and partially visible only
int j, visible=0;
double grid_step = 2./grid; //The stellar disc goes from -1 to 1, therefore 2
double miny=1, minz=1, maxy=-1, maxz=-1; // init to 'opposite'-extreme values
int counton=0, countoff=0; // count how many points of the circumference are
// visible and how many are invisible
for (j=0; j<nrho; j++) //scan each point of the circumference
if (xlylzl[j][0]>=0) { // if x>=0
counton += 1;
// select the extreme points of the circumference
if (xlylzl[j][1]<miny) miny = xlylzl[j][1];
if (xlylzl[j][2]<minz) minz = xlylzl[j][2];
if (xlylzl[j][1]>maxy) maxy = xlylzl[j][1];
if (xlylzl[j][2]>maxz) maxz = xlylzl[j][2];
}
else countoff = 1;
if ((counton>0)&&(countoff>0)) { // There are both visible and invisible points
// --> active region is on the edge
// In this situation there are cases where the yz-area define above is
// actually smaller than the real area of the active region on the stellar disk.
// The minima/maxima are over/under-estimated if the active region is on one of the
// axis (y or z). Because if on the y axis, the minimum (or maximum) won t be on the circumference of the active region. Same for z axis
if (miny*maxy<0) { //active region on the z-axis because one point is on the positive side of z, and the other on the negative side of z
if (minz<0) minz=-1; //active region on the bottom-z axis (z<0)
else maxz=1;} //active region on the top-z axis (z>=0)
if (minz*maxz<0) { //active region on the y-axis because one point is on the positive side of y, and the other on the negative side of z
if (miny<0) miny=-1; //active region on the left hand-y axis (y<0)
else maxy=1;} //active region on the right hand-y axis (y>=0)
};
if (counton==0) visible = 0;
else visible = 1;
// Indices of miny, minz,... on the grid
*iminy = floor((1.+miny)/grid_step); //floor(x) returns the largest integral value that is not greater than x.
//floor of 2.3 is 2.0, floor of 3.8 is 3.0, floor of -2.3 is -3.0, floor of -3.8 is -4.0
*iminz = floor((1.+minz)/grid_step);
*imaxy = ceil((1.+maxy)/grid_step); //ceil(x) returns the smallest integral value that is not less than x.
//ceil of 2.3 is 3, ceil of 3.8 is 4.0, ceil of -2.3 is -2.0, ceil of -3.8 is -3.0
*imaxz = ceil((1.+maxz)/grid_step);
return visible;
}
void spot_scan(double v, double i, double limba1, double limba2, double modif_bis_quad, double modif_bis_lin, double modif_bis_cte, int grid,
double *vrad_ccf, double *intensity_ccf, double *intensity_ccf_spot, double v_interval, int n_v, int n,
double s, double longitude, double phase, double latitude,
int iminy, int iminz, int imaxy,
int imaxz, double *f_spot_flux, double *f_spot_bconv, double *f_spot_tot, double *sum_spot,
int magn_feature_type, int T_star, int T_diff_spot)
{
/* Scan of the yz-area where the spot is.
* For each grid-point (y,z) we need to check whether it belongs to the spot
* or not. Sadly, we do not know the projected geometry of the spot in its
* actual position. Thus, we have to do an inverse rotation to replace the
* grid point where it would be in the initial configuration. Indeed, in the
* initial configuration, the spot has a well known geometry of a circle
* centered on the x-axis.
*/
int j, iy, iz,diff_CCF_non_v_and_v,n_v_shifted_quotient;
double n_v_shifted, n_v_shifted_remainder;
double y, z;
double delta_grid=2./grid, delta, r_cos;
double limb, delta_v = 2.*v_interval/(n_v-1);
double *xayaza; // actual coordinates
double *xiyizi; // coordinates transformed back to the initial configuration
double *intensity_ccf_spot_shift;
double *intensity_ccf_shift;
int T_spot,T_plage;
double intensity,loi_Planck_star;
//the wavelength of the Kitt peak spectrum goes from 3921.2441 to 6665.5789,
// the mean being 5293.4115
loi_Planck_star = loi_Planck(5293.4115e-10, T_star);
xayaza = (double *)malloc(sizeof(double)*3);
xiyizi = (double *)malloc(sizeof(double)*3);
intensity_ccf_shift = (double *)malloc(sizeof(double)*n);
intensity_ccf_spot_shift = (double *)malloc(sizeof(double)*n);
// Scan of each cell on the grid
for (iy=iminy; iy<imaxy; iy++) // y-scan
{
y = -1.+iy*delta_grid; // y between -1 et 1
delta = y * v * sin(i*pi/180.); // Give the velocity of the rotation for the given grid cell as a function of y.
// For y=-1 => Vrot min, for y=1 => Vrot max, and for y=0 => Vrot = 0.
xayaza[1] = y;
for (iz=iminz; iz<imaxz; iz++) // z-scan
{
z = -1.+iz*delta_grid; // z between -1 et 1
if (z*z+y*y<1.) //projected radius on the sky smaller than 1, which means that we are on the stellar disc
{
xayaza[0] = pow(1.-(y*y+z*z),.5); //sqrt(r^2-(y^2+z^2)) where r=1. This is equal to 1 at the disc center, and 0 on the limb.
//This is often referred in the literature as cos(theta)
xayaza[2] = z;
// xayaza --> xiyizi: Rotate the star so that the spot is on the disc center
spot_inverse_rotation(xayaza,longitude,latitude,i,phase,xiyizi);
// if inside the active region when scanning the grid
if (xiyizi[0]*xiyizi[0]>=1.-s*s) // x^2 >= 1-s^2, which means that you are inside the active region
{
//limb-darkening
r_cos = pow(1.-(y*y+z*z),.5); //sqrt(r^2-(y^2+z^2)) where r=1. This is equal to 1 at the disc center, and 0 on the limb.
//This is often referred in the literature as cos(theta)
limb = 1. - limba1*(1-r_cos) - limba2*(1-r_cos)*(1-r_cos); //intensity due to limb-darkening (Mandel & Agol 2002)
// intensity of the spot (magn_feature_type==0) or the plage (magn_feature_type==1)
if (magn_feature_type==0)
{
T_spot = T_star-T_diff_spot;
intensity = loi_Planck(5293.4115e-10,T_spot)/loi_Planck_star; //the wavelength of the Kitt peak spectrum goes from 3921.2441+6665.5789, the mean being 5293.4115
}
else
{
T_plage = T_star+250.9-407.7*r_cos+190.9*pow(r_cos,2); //plages are brighter on the limb Meunier 2010
intensity = loi_Planck(5293.4115e-10,T_plage)/loi_Planck_star; //the wavelength of the Kitt peak spectrum goes from 3921.2441+6665.5789, the mean being 5293.4115
}
n_v_shifted = delta/delta_v; // by how much steps the CCF is shifted due to rotation
n_v_shifted_quotient = rndup(n_v_shifted,0); // integer number of steps
n_v_shifted_remainder = (delta - n_v_shifted_quotient*delta_v); // remainder of the division between delta and the integer number of steps
double v_shift = n_v_shifted_remainder;
//shifting the CCF with the remainder of n_v_shifted, the quotient will be taken into account by shifting all the points of the spectrum
shifting_CCF(vrad_ccf, intensity_ccf, intensity_ccf_shift, v_shift,n);
shifting_CCF(vrad_ccf, intensity_ccf_spot, intensity_ccf_spot_shift, v_shift,n);
diff_CCF_non_v_and_v = (n_v - n) / 2.; //difference in number of step between the CCF without any rotation and the one with rotation
// To take into account the rotation, we increase the width of the CCF. Because the original CCF is only defined between -20 and +20 km/s,
// we have to extrapolate on each side of the boundaries of the CCF. In this case, we calculate the "non contribution" to the CCF.
// For the region inside the active region, we calculate the contribution of the quiet photosphere and then suppress the contribution of the active region.
// It will then be easy to include the contribution of the active region by just subtracting the "non contribution" to the integrated contribution of the
// star without active region calculated with the "itot" function
for (j=0;j<diff_CCF_non_v_and_v+n_v_shifted_quotient;j++)
{
// extrapolation on the left of the CCF with the value of the left boundary, weighted by the limb-darkening and the active region intensity
// We also consider limb-darkening for spots because we also observe them with different stellar depth depending on their position
f_spot_flux[j] += intensity_ccf_shift[0]*limb*(1 - intensity);// only flux effect
f_spot_bconv[j] += intensity_ccf_shift[0]*limb*(1 - 1); // only convective blueshift effect. The convective blueshift does not affect the boundaries of the CCF
f_spot_tot[j] += intensity_ccf_shift[0]*limb*(1 - intensity);// combined effect
}
for (j=diff_CCF_non_v_and_v+n_v_shifted_quotient;j<n+(diff_CCF_non_v_and_v+n_v_shifted_quotient);j++)
{
// value of the CCF, weighted by the limb-darkening and the active region intensity
// We also consider limb-darkening for spots because we also observe them with different stellar depth depending on their position
f_spot_flux[j] += intensity_ccf_shift[j-(diff_CCF_non_v_and_v+n_v_shifted_quotient)]*limb - intensity * (intensity_ccf_shift[j-(diff_CCF_non_v_and_v+n_v_shifted_quotient)])*limb; // only flux effect
f_spot_bconv[j] += intensity_ccf_shift[j-(diff_CCF_non_v_and_v+n_v_shifted_quotient)]*limb - (intensity_ccf_spot_shift[j-(diff_CCF_non_v_and_v+n_v_shifted_quotient)])*limb; // only convective blueshift effect
f_spot_tot[j] += intensity_ccf_shift[j-(diff_CCF_non_v_and_v+n_v_shifted_quotient)]*limb - intensity * (intensity_ccf_spot_shift[j-(diff_CCF_non_v_and_v+n_v_shifted_quotient)])*limb; // combined effect
}
for (j=n+(diff_CCF_non_v_and_v+n_v_shifted_quotient);j<n_v;j++)
{
// extrapolation on the right of the CCF with the value of the right boundary, weighted by the limb-darkening and the active region intensity
// We also consider limb-darkening for spots because we also observe them with different stellar depth depending on their position
f_spot_flux[j] += intensity_ccf_shift[n-1]*limb*(1 - intensity);// only flux effect
f_spot_bconv[j] += intensity_ccf_shift[n-1]*limb*(1 - 1); // only convective blueshift effect. The convective blueshift does not affect the boundaries of the CCF
f_spot_tot[j] += intensity_ccf_shift[n-1]*limb*(1 - intensity);// combined effect
}
// calculates the "non contributing" total flux of the active region taking into account the limb-darkening and the active region intensity
*sum_spot += intensity_ccf_shift[0]*limb*(1.-intensity);
}
}
}
}
free(xayaza); free(xiyizi);
free(intensity_ccf_shift); free(intensity_ccf_spot_shift);
}
void spot_inverse_rotation(double *xyz, double longitude, double latitude,
double inclination, double phase, double *xiyizi)
{
/*
* Relocate a point (x,y,z) to the 'initial' configuration
* i.e. when the active region is on the disc center
*
* This consists in rotating the point, according to latitude, longitude,
* inclination and phase, but in the reverse order.
*
//
// Conventions :
// - when inclination=0 the star rotates around z axis
// (i.e. rotation axis and z axis are indistinct),
// - line of sight is along x-axis
// - sky plane = yz-plane
*/
double g2 = --phase*(2*pi); // inverse phase ([0-1] -> [rad])
double i = inclination * pi/180.;
double b = latitude * pi/180.;
double g = -longitude * pi/180.;
double b2 = -(pi/2.-i);
double R[3][3] = {{(1-cos(g2))*cos(i)*cos(i) + cos(g2), sin(g2)*sin(i), (1-cos(g2))*cos(i)*sin(i)},
{-sin(g2)*sin(i), cos(g2), sin(g2)*cos(i)},
{(1-cos(g2))*sin(i)*cos(i), -sin(g2)*cos(i), (1-cos(g2))*sin(i)*sin(i) + cos(g2)}};
double R2[3][3] = {{cos(b)*cos(g)*cos(b2)-sin(b2)*sin(b), -sin(g)*cos(b), cos(b)*cos(g)*sin(b2)+sin(b)*cos(b2)},
{sin(g)*cos(b2), cos(g), sin(g)*sin(b2)},
{-sin(b)*cos(g)*cos(b2)-cos(b)*sin(b2), sin(b)*sin(g), -sin(b)*cos(g)*sin(b2)+cos(b)*cos(b2)}};
//rotation for the latitude, longitude, inclination and phase, which is a combination of R and R2
double R3[3][3] = {{R2[0][0]*R[0][0]+R2[0][1]*R[1][0]+R2[0][2]*R[2][0],
R2[0][0]*R[0][1]+R2[0][1]*R[1][1]+R2[0][2]*R[2][1],
R2[0][0]*R[0][2]+R2[0][1]*R[1][2]+R2[0][2]*R[2][2]},
{R2[1][0]*R[0][0]+R2[1][1]*R[1][0]+R2[1][2]*R[2][0],
R2[1][0]*R[0][1]+R2[1][1]*R[1][1]+R2[1][2]*R[2][1],
R2[1][0]*R[0][2]+R2[1][1]*R[1][2]+R2[1][2]*R[2][2]},
{R2[2][0]*R[0][0]+R2[2][1]*R[1][0]+R2[2][2]*R[2][0],
R2[2][0]*R[0][1]+R2[2][1]*R[1][1]+R2[2][2]*R[2][1],
R2[2][0]*R[0][2]+R2[2][1]*R[1][2]+R2[2][2]*R[2][2]}};
//Coordinates of the active region circumference when it is in the disc center
xiyizi[0] = R3[0][0]*xyz[0] + R3[0][1]*xyz[1] + R3[0][2]*xyz[2];
xiyizi[1] = R3[1][0]*xyz[0] + R3[1][1]*xyz[1] + R3[1][2]*xyz[2];
xiyizi[2] = R3[2][0]*xyz[0] + R3[2][1]*xyz[1] + R3[2][2]*xyz[2];
}
void spot_scan_npsi(double **xyz, int nrho, double *psi, int npsi, double v,
double inclination, double limba1, double limba2,
double modif_bis_quad, double modif_bis_lin, double modif_bis_cte,
int grid,
double *vrad_ccf, double *intensity_ccf,
double *intensity_ccf_spot,
double v_interval, int n_v, int n, double s,
double longitude, double latitude,
double **f_spot_flux, double **f_spot_bconv,
double **f_spot_tot, double *sum_spot,
int magn_feature_type, int T_star, int T_diff_spot)
{
/*
* Scans the yz-area where the spot is for different phases (psi) and
* returns the spot's "non-contribution" to the total flux and its
* "non-contribution" to the ccf, for each phase.
* Thus the result is to be subtracted to the output of the itot() function.
*/
//tbd before:
//spot_init(s, longitude, latitude, inclination, nrho, xyz) #out: xyz
int ipsi, j;
int iminy, iminz, imaxy, imaxz, vis;
double **xyz2 = (double **)malloc(sizeof(double *)*nrho);
for (j=0; j<nrho; j++) xyz2[j] = (double *)malloc(sizeof(double)*3);
for (ipsi=0; ipsi<npsi; ipsi++)
{
spot_phase(xyz, inclination, nrho, psi[ipsi], xyz2);
vis = spot_area(xyz2, nrho, grid, &iminy, &iminz, &imaxy, &imaxz);
if (vis==1)
{
// printf("%f\n", s);
spot_scan(v, inclination, limba1, limba2, modif_bis_quad, modif_bis_lin, modif_bis_cte, grid,
vrad_ccf, intensity_ccf, intensity_ccf_spot,v_interval, n_v, n,
s, longitude, psi[ipsi], latitude, iminy, iminz, imaxy,
imaxz, f_spot_flux[ipsi], f_spot_bconv[ipsi], f_spot_tot[ipsi], &sum_spot[ipsi],magn_feature_type,T_star,T_diff_spot);
}
}
for (j=0; j<nrho; j++) free(xyz2[j]);
free(xyz2);
}
void spot_scan_npsi2(double **xyz, int nrho, double *psi, int npsi, double v,
double inclination, double limba1, double limba2,
double modif_bis_quad, double modif_bis_lin, double modif_bis_cte,
int grid,
double *vrad_ccf, double *intensity_ccf,
double *intensity_ccf_spot,
double v_interval, int n_v, int n, double *s,
double longitude, double latitude,
double **f_spot_flux, double **f_spot_bconv,
double **f_spot_tot, double *sum_spot,
int magn_feature_type, int T_star, int T_diff_spot)
{
/*
* Scans the yz-area where the spot is for different phases (psi) and
* returns the spot's "non-contribution" to the total flux and its
* "non-contribution" to the ccf, for each phase.
* Thus the result is to be subtracted to the output of the itot() function.
*/
//tbd before:
//spot_init(s, longitude, latitude, inclination, nrho, xyz) #out: xyz
int ipsi, j;
int iminy, iminz, imaxy, imaxz, vis;
double **xyz2 = (double **)malloc(sizeof(double *)*nrho);
for (j=0; j<nrho; j++) xyz2[j] = (double *)malloc(sizeof(double)*3);
#pragma omp parallel if(npsi>99)
{
// printf("%d\n", omp_get_num_threads());
#pragma omp for private(vis, iminy, iminz, imaxy, imaxz)
for (ipsi=0; ipsi<npsi; ipsi++)
{
spot_phase(xyz, inclination, nrho, psi[ipsi], xyz2);
vis = spot_area(xyz2, nrho, grid, &iminy, &iminz, &imaxy, &imaxz);
if (vis==1)
{
spot_scan(v, inclination, limba1, limba2, modif_bis_quad, modif_bis_lin, modif_bis_cte, grid,
vrad_ccf, intensity_ccf, intensity_ccf_spot,v_interval, n_v, n,
s[ipsi], longitude, psi[ipsi], latitude, iminy, iminz, imaxy, imaxz,
f_spot_flux[ipsi], f_spot_bconv[ipsi], f_spot_tot[ipsi], &sum_spot[ipsi],magn_feature_type,T_star,T_diff_spot);
}
}
}
for (j=0; j<nrho; j++) free(xyz2[j]);
free(xyz2);
}
|
fractional_step_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#ifndef KRATOS_FRACTIONAL_STEP_STRATEGY
#define KRATOS_FRACTIONAL_STEP_STRATEGY
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/cfd_variables.h"
#include "processes/process.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "utilities/variable_utils.h"
#include "utilities/entities_utilities.h"
// Application includes
#include "custom_utilities/solver_settings.h"
#include "fluid_dynamics_application_variables.h"
namespace Kratos {
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @brief Fractional-step strategy for incompressible Navier-Stokes formulation
* This strategy implements a splitting scheme for the incompressible Navier-Stokes equations.
* It is intended to be used in combination with the FractionalStep element in the FluidDynamicsApplication.
* The fractional step index, which is stored in the ProcessInfo, takes the values
* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
* @tparam TSparseSpace Sparse space template type
* @tparam TDenseSpace Dense space template type
* @tparam TLinearSolver Linear solver template type
*/
template <class TSparseSpace, class TDenseSpace, class TLinearSolver>
class FractionalStepStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
/// Counted pointer of FractionalStepStrategy
KRATOS_CLASS_POINTER_DEFINITION(FractionalStepStrategy);
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef SolverSettings<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
FractionalStepStrategy(ModelPart& rModelPart,
SolverSettingsType& rSolverConfig,
bool PredictorCorrector):
BaseType(rModelPart,false),
mCalculateReactionsFlag(false),
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{
KRATOS_WARNING("FractionalStepStrategy") << "This constructor is deprecated. Use the one with the \'CalculateReactionsFlag\' instead." << std::endl;
InitializeStrategy(rSolverConfig,PredictorCorrector);
}
FractionalStepStrategy(ModelPart& rModelPart,
SolverSettingsType& rSolverConfig,
bool PredictorCorrector,
const Kratos::Variable<int>& PeriodicVar):
BaseType(rModelPart,false),
mCalculateReactionsFlag(false),
mrPeriodicIdVar(PeriodicVar)
{
KRATOS_WARNING("FractionalStepStrategy") << "This constructor is deprecated. Use the one with the \'CalculateReactionsFlag\' instead." << std::endl;
InitializeStrategy(rSolverConfig,PredictorCorrector);
}
FractionalStepStrategy(
ModelPart& rModelPart,
SolverSettingsType& rSolverConfig,
bool PredictorCorrector,
bool CalculateReactionsFlag)
: BaseType(rModelPart,false)
, mCalculateReactionsFlag(CalculateReactionsFlag)
, mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{
InitializeStrategy(rSolverConfig,PredictorCorrector);
}
FractionalStepStrategy(
ModelPart& rModelPart,
SolverSettingsType& rSolverConfig,
bool PredictorCorrector,
bool CalculateReactionsFlag,
const Kratos::Variable<int>& PeriodicVar)
: BaseType(rModelPart,false)
, mCalculateReactionsFlag(CalculateReactionsFlag)
, mrPeriodicIdVar(PeriodicVar)
{
InitializeStrategy(rSolverConfig,PredictorCorrector);
}
/// Destructor.
~FractionalStepStrategy() override{}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void Initialize() override
{
// Set up nodes to use slip conditions if needed.
if (mUseSlipConditions) {
auto& r_model_part = BaseType::GetModelPart();
const int n_conds = r_model_part.NumberOfConditions();
#pragma omp parallel for
for (int i_cond = 0; i_cond < n_conds; ++i_cond) {
auto it_cond = r_model_part.ConditionsBegin() + i_cond;
if (it_cond->Is(SLIP)) {
auto& r_geom = it_cond->GetGeometry();
for (auto& r_node : r_geom) {
r_node.SetLock();
r_node.Set(SLIP, true);
r_node.UnSetLock();
}
}
}
}
// Initialize all the elemnets and conditions
EntitiesUtilities::InitializeAllEntities(BaseType::GetModelPart());
}
int Check() override
{
KRATOS_TRY;
// Base strategy check
int ierr = BaseType::Check();
if (ierr != 0) {
return ierr;
}
// Check time order and buffer size
const auto& r_model_part = BaseType::GetModelPart();
KRATOS_ERROR_IF(mTimeOrder == 2 && r_model_part.GetBufferSize() < 3)
<< "Buffer size too small for fractional step strategy (BDF2), needed 3, got " << r_model_part.GetBufferSize() << std::endl;
KRATOS_ERROR_IF(mTimeOrder == 1 && r_model_part.GetBufferSize() < 2)
<< "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got " << r_model_part.GetBufferSize() << std::endl;
// Check elements and conditions
const auto &r_current_process_info = r_model_part.GetProcessInfo();
for (const auto& r_element : r_model_part.Elements()) {
ierr = r_element.Check(r_current_process_info);
if (ierr != 0) {
break;
}
}
for (const auto& r_condition : r_model_part.Conditions()) {
ierr = r_condition.Check(r_current_process_info);
if (ierr != 0) {
break;
}
}
return ierr;
KRATOS_CATCH("");
}
void InitializeSolutionStep() override
{
// Initialize BDF2 coefficients
SetTimeCoefficients();
}
bool SolveSolutionStep() override
{
bool converged = false;
if (mPredictorCorrector) {
const unsigned int echo_level = BaseType::GetEchoLevel();
// Iterative solution for pressure
for (unsigned int it = 0; it < mMaxPressureIter; ++it) {
KRATOS_INFO_IF("FractionalStepStrategy", echo_level > 1) << "Pressure iteration " << it << std::endl;
const auto convergence_output = this->SolveStep();
converged = this->CheckPressureConvergence(std::get<1>(convergence_output));
if (converged) {
KRATOS_INFO_IF("FractionalStepStrategy", echo_level > 0) << "Predictor-corrector converged in " << it + 1 << " iterations." << std::endl;
break;
}
}
KRATOS_WARNING_IF("FractionalStepStrategy", !converged && echo_level > 0) << "Predictor-corrector iterations did not converge." << std::endl;
} else {
// Solve for fractional step velocity, then update pressure once
const auto convergence_output = this->SolveStep();
// If not doing predictor corrector iterations, norm_dp will
// typically be "large" since we are not iterating on pressure.
// It makes no sense to report that the iteration didn't converge
// based on this. Hence, what we report is the convergence of the
// fractional step velocity.
converged = std::get<0>(convergence_output);
}
// Calculate reactions
if (mCalculateReactionsFlag) {
CalculateReactions();
}
return converged;
}
void FinalizeSolutionStep() override
{
if (mReformDofSet) {
this->Clear();
}
}
//TODO: Move to private section as soon as we remove the Python exposure
/**
* @brief Calculates the reactions
* This methods calculates the reactions of the momentum equation.
* These are computed as minus the RHS and saved in the REACTION variable
*/
virtual void CalculateReactions()
{
auto &r_model_part = BaseType::GetModelPart();
auto &r_process_info = r_model_part.GetProcessInfo();
const int n_elems = r_model_part.NumberOfElements();
// Set fractional step index to the momentum equation step
const int original_step = r_process_info[FRACTIONAL_STEP];
r_process_info.SetValue(FRACTIONAL_STEP, 1);
// Allocate and initialize values for REACTION calculation
LocalSystemVectorType RHS_Contribution;
LocalSystemMatrixType LHS_Contribution;
const auto &r_const_process_info = r_process_info;
VariableUtils().SetHistoricalVariableToZero(REACTION, r_model_part.Nodes());
#pragma omp parallel for private(RHS_Contribution, LHS_Contribution)
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
// Build local system
auto it_elem = r_model_part.ElementsBegin() + i_elem;
it_elem->CalculateLocalSystem(
LHS_Contribution,
RHS_Contribution,
r_const_process_info);
// Accumulate minus the RHS as the reaction
unsigned int index = 0;
auto& r_geom = it_elem->GetGeometry();
const unsigned int n_nodes = r_geom.PointsNumber();
for (unsigned int i = 0; i < n_nodes; ++i) {
r_geom[i].SetLock();
auto& r_reaction = r_geom[i].FastGetSolutionStepValue(REACTION);
for (unsigned int d = 0; d < mDomainSize; ++d) {
r_reaction[d] -= RHS_Contribution[index++];
}
r_geom[i].UnSetLock();
}
}
// Synchronize the local REACTION values
r_model_part.GetCommunicator().AssembleCurrentData(REACTION);
// Reset original fractional step index
r_process_info.SetValue(FRACTIONAL_STEP, original_step);
}
virtual void AddIterationStep(Process::Pointer pNewStep)
{
mExtraIterationSteps.push_back(pNewStep);
}
virtual void ClearExtraIterationSteps()
{
mExtraIterationSteps.clear();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
/**
* @brief This method sets the flag mCalculateReactionsFlag
* @param CalculateReactionsFlag The flag that tells if the reactions are computed
*/
void SetCalculateReactionsFlag(bool CalculateReactionsFlag)
{
mCalculateReactionsFlag = CalculateReactionsFlag;
}
/**
* @brief This method returns the flag mCalculateReactionsFlag
* @return The flag that tells if the reactions are computed
*/
bool GetCalculateReactionsFlag()
{
return mCalculateReactionsFlag;
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "FractionalStepStrategy" ;
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
double mPressureGradientRelaxationFactor;
unsigned int mMaxVelocityIter;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mPredictorCorrector;
bool mUseSlipConditions;
bool mReformDofSet;
bool mCalculateReactionsFlag;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
std::vector< Process::Pointer > mExtraIterationSteps;
const Kratos::Variable<int>& mrPeriodicIdVar;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Set the Time Coefficients object
* Calculate the coefficients for the BDF2 time iteration.
* These are stored in the BDF_COEFFICIENTS variable of the ProcessInfo container.
*/
void SetTimeCoefficients()
{
KRATOS_TRY;
auto &r_process_info = (BaseType::GetModelPart()).GetProcessInfo();
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = r_process_info[DELTA_TIME];
double OldDt = r_process_info.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector& BDFcoeffs = r_process_info[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = r_process_info[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector& BDFcoeffs = r_process_info[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
virtual std::tuple<bool,double> SolveStep()
{
ModelPart& rModelPart = BaseType::GetModelPart();
const int n_nodes = rModelPart.NumberOfNodes();
// 1. Fractional step momentum iteration
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1);
bool Converged = false;
for(unsigned int it = 0; it < mMaxVelocityIter; ++it)
{
KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 1) << "Momentum iteration " << it << std::endl;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1);
double NormDv = mpMomentumStrategy->Solve();
// // Compute projections (for stabilization)
// rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4);
// this->ComputeSplitOssProjections(rModelPart);
// // Additional steps // Moved to end of step
// for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin();
// iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps)
// (*iExtraSteps)->Execute();
// Check convergence
Converged = this->CheckFractionalStepConvergence(NormDv);
if (Converged)
{
KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Fractional velocity converged in " << it + 1 << " iterations." << std::endl;
break;
}
}
KRATOS_INFO_IF("FractionalStepStrategy", !Converged && BaseType::GetEchoLevel() > 0) << "Fractional velocity iterations did not converge." << std::endl;
// Compute projections (for stabilization)
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4);
this->ComputeSplitOssProjections(rModelPart);
// 2. Pressure solution (store pressure variation in PRESSURE_OLD_IT)
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,5);
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
const double old_press = it_node->FastGetSolutionStepValue(PRESSURE);
it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) = -mPressureGradientRelaxationFactor * old_press;
}
KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Calculating Pressure." << std::endl;
double NormDp = mpPressureStrategy->Solve();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) += it_node->FastGetSolutionStepValue(PRESSURE);
}
// 3. Compute end-of-step velocity
KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Updating Velocity." << std::endl;
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,6);
this->CalculateEndOfStepVelocity();
/*
mpPressureStrategy->Clear();
double NormDu = mpPressureStrategy->Solve();
mpPressureStrategy->Clear();
*/
// Additional steps
for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin();
iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps)
(*iExtraSteps)->Execute();
// Set the output tuple as the fractional velocity convergence and pressure norm
return std::make_tuple(Converged, NormDp);
}
bool CheckFractionalStepConvergence(const double NormDv)
{
ModelPart& rModelPart = BaseType::GetModelPart();
const int n_nodes = rModelPart.NumberOfNodes();
double NormV = 0.00;
#pragma omp parallel for reduction(+:NormV)
for (int i_node = 0; i_node < n_nodes; ++i_node) {
const auto it_node = rModelPart.NodesBegin() + i_node;
const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY);
for (unsigned int d = 0; d < 3; ++d) {
NormV += r_vel[d] * r_vel[d];
}
}
NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
const double zero_tol = 1.0e-12;
const double Ratio = (NormV < zero_tol) ? NormDv : NormDv / NormV;
KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "CONVERGENCE CHECK:" << std::endl;
KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0)
<< std::scientific << std::setprecision(8) << "FRAC VEL.: ratio = " << Ratio <<"; exp.ratio = " << mVelocityTolerance << " abs = " << NormDv << std::endl;
if (Ratio < mVelocityTolerance)
return true;
else
return false;
}
bool CheckPressureConvergence(const double NormDp)
{
ModelPart& rModelPart = BaseType::GetModelPart();
const int n_nodes = rModelPart.NumberOfNodes();
double NormP = 0.00;
#pragma omp parallel for reduction(+:NormP)
for (int i_node = 0; i_node < n_nodes; ++i_node) {
const auto it_node = rModelPart.NodesBegin() + i_node;
const double Pr = it_node->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
const double zero_tol = 1.0e-12;
const double Ratio = (NormP < zero_tol) ? NormDp : NormDp / NormP;
KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Pressure relative error: " << Ratio << std::endl;
if (Ratio < mPressureTolerance)
{
return true;
}
else
return false;
}
virtual void ComputeSplitOssProjections(ModelPart& rModelPart)
{
array_1d<double,3> Out = ZeroVector(3);
const int n_nodes = rModelPart.NumberOfNodes();
const int n_elems = rModelPart.NumberOfElements();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
it_node->FastGetSolutionStepValue(CONV_PROJ) = CONV_PROJ.Zero();
it_node->FastGetSolutionStepValue(PRESS_PROJ) = PRESS_PROJ.Zero();
it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0;
it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
#pragma omp parallel for
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
const auto it_elem = rModelPart.ElementsBegin() + i_elem;
it_elem->Calculate(CONV_PROJ, Out, rModelPart.GetProcessInfo());
}
rModelPart.GetCommunicator().AssembleCurrentData(CONV_PROJ);
rModelPart.GetCommunicator().AssembleCurrentData(PRESS_PROJ);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
// If there are periodic conditions, add contributions from both sides to the periodic nodes
this->PeriodicConditionProjectionCorrection(rModelPart);
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA);
it_node->FastGetSolutionStepValue(CONV_PROJ) /= NodalArea;
it_node->FastGetSolutionStepValue(PRESS_PROJ) /= NodalArea;
it_node->FastGetSolutionStepValue(DIVPROJ) /= NodalArea;
}
}
virtual void CalculateEndOfStepVelocity()
{
ModelPart& rModelPart = BaseType::GetModelPart();
const int n_nodes = rModelPart.NumberOfNodes();
const int n_elems = rModelPart.NumberOfElements();
array_1d<double,3> Out = ZeroVector(3);
VariableUtils().SetHistoricalVariableToZero(FRACT_VEL, rModelPart.Nodes());
#pragma omp parallel for
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
const auto it_elem = rModelPart.ElementsBegin() + i_elem;
it_elem->Calculate(VELOCITY, Out, rModelPart.GetProcessInfo());
}
rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL);
this->PeriodicConditionVelocityCorrection(rModelPart);
// Force the end of step velocity to verify slip conditions in the model
if (mUseSlipConditions)
this->EnforceSlipCondition(SLIP);
if (mDomainSize > 2)
{
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA);
if ( ! it_node->IsFixed(VELOCITY_X) )
it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea;
if ( ! it_node->IsFixed(VELOCITY_Y) )
it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea;
if ( ! it_node->IsFixed(VELOCITY_Z) )
it_node->FastGetSolutionStepValue(VELOCITY_Z) += it_node->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalArea;
}
}
else
{
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA);
if ( ! it_node->IsFixed(VELOCITY_X) )
it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea;
if ( ! it_node->IsFixed(VELOCITY_Y) )
it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea;
}
}
}
/**
* @brief Substract wall-normal component of velocity update to ensure that the final velocity satisfies slip conditions.
* @param rSlipWallFlag If Node.Is(rSlipWallFlag) == true, the node is in the wall.
*/
void EnforceSlipCondition(const Kratos::Flags& rSlipWallFlag)
{
ModelPart& rModelPart = BaseType::GetModelPart();
const int num_nodes_in_model_part = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i = 0; i < num_nodes_in_model_part; i++)
{
ModelPart::NodeIterator itNode = rModelPart.NodesBegin() + i;
const Node<3>& r_const_node = *itNode;
if ( r_const_node.Is(rSlipWallFlag) )
{
const array_1d<double,3>& rNormal = itNode->FastGetSolutionStepValue(NORMAL);
array_1d<double,3>& rDeltaVelocity = itNode->FastGetSolutionStepValue(FRACT_VEL);
double Proj = rNormal[0] * rDeltaVelocity[0];
double Norm = rNormal[0] * rNormal[0];
for (unsigned int d = 1; d < mDomainSize; ++d)
{
Proj += rNormal[d] * rDeltaVelocity[d];
Norm += rNormal[d] * rNormal[d];
}
Proj /= Norm;
rDeltaVelocity -= Proj * rNormal;
}
}
}
/** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on
* both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n
* 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n
* 2- The non-historical containers are added across processes, transmiting the right value from the condition owner to all partitions.\n
* 3- The value on all periodic nodes is replaced by the one received in step 2.
*/
void PeriodicConditionProjectionCorrection(ModelPart& rModelPart)
{
Communicator& r_comm = rModelPart.GetCommunicator();
if (mrPeriodicIdVar.Key() != Kratos::Variable<int>::StaticObject().Key())
{
int GlobalNodesNum = r_comm.LocalMesh().Nodes().size();
GlobalNodesNum = r_comm.GetDataCommunicator().SumAll(GlobalNodesNum);
for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ )
{
ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry();
if (rGeom.PointsNumber() == 2)
{
Node<3>& rNode0 = rGeom[0];
int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar);
Node<3>& rNode1 = rGeom[1];
int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar);
// If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition)
if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) )
{
double NodalArea = rNode0.FastGetSolutionStepValue(NODAL_AREA) + rNode1.FastGetSolutionStepValue(NODAL_AREA);
array_1d<double,3> ConvProj = rNode0.FastGetSolutionStepValue(CONV_PROJ) + rNode1.FastGetSolutionStepValue(CONV_PROJ);
array_1d<double,3> PressProj = rNode0.FastGetSolutionStepValue(PRESS_PROJ) + rNode1.FastGetSolutionStepValue(PRESS_PROJ);
double DivProj = rNode0.FastGetSolutionStepValue(DIVPROJ) + rNode1.FastGetSolutionStepValue(DIVPROJ);
rNode0.GetValue(NODAL_AREA) = NodalArea;
rNode0.GetValue(CONV_PROJ) = ConvProj;
rNode0.GetValue(PRESS_PROJ) = PressProj;
rNode0.GetValue(DIVPROJ) = DivProj;
rNode1.GetValue(NODAL_AREA) = NodalArea;
rNode1.GetValue(CONV_PROJ) = ConvProj;
rNode1.GetValue(PRESS_PROJ) = PressProj;
rNode1.GetValue(DIVPROJ) = DivProj;
}
}
else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum)
{
double NodalArea = rGeom[0].FastGetSolutionStepValue(NODAL_AREA);
array_1d<double,3> ConvProj = rGeom[0].FastGetSolutionStepValue(CONV_PROJ);
array_1d<double,3> PressProj = rGeom[0].FastGetSolutionStepValue(PRESS_PROJ);
double DivProj = rGeom[0].FastGetSolutionStepValue(DIVPROJ);
for (unsigned int i = 1; i < 4; i++)
{
NodalArea += rGeom[i].FastGetSolutionStepValue(NODAL_AREA);
ConvProj += rGeom[i].FastGetSolutionStepValue(CONV_PROJ);
PressProj += rGeom[i].FastGetSolutionStepValue(PRESS_PROJ);
DivProj += rGeom[i].FastGetSolutionStepValue(DIVPROJ);
}
for (unsigned int i = 0; i < 4; i++)
{
rGeom[i].GetValue(NODAL_AREA) = NodalArea;
rGeom[i].GetValue(CONV_PROJ) = ConvProj;
rGeom[i].GetValue(PRESS_PROJ) = PressProj;
rGeom[i].GetValue(DIVPROJ) = DivProj;
}
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleNonHistoricalData(CONV_PROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(PRESS_PROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
if (itNode->GetValue(NODAL_AREA) != 0.0)
{
itNode->FastGetSolutionStepValue(NODAL_AREA) = itNode->GetValue(NODAL_AREA);
itNode->FastGetSolutionStepValue(CONV_PROJ) = itNode->GetValue(CONV_PROJ);
itNode->FastGetSolutionStepValue(PRESS_PROJ) = itNode->GetValue(PRESS_PROJ);
itNode->FastGetSolutionStepValue(DIVPROJ) = itNode->GetValue(DIVPROJ);
// reset for next iteration
itNode->GetValue(NODAL_AREA) = 0.0;
itNode->GetValue(CONV_PROJ) = CONV_PROJ.Zero();
itNode->GetValue(PRESS_PROJ) = PRESS_PROJ.Zero();
itNode->GetValue(DIVPROJ) = 0.0;
}
}
}
}
void PeriodicConditionVelocityCorrection(ModelPart& rModelPart)
{
Communicator& r_comm = rModelPart.GetCommunicator();
if (mrPeriodicIdVar.Key() != Kratos::Variable<int>::StaticObject().Key())
{
int GlobalNodesNum = r_comm.LocalMesh().Nodes().size();
GlobalNodesNum = r_comm.GetDataCommunicator().SumAll(GlobalNodesNum);
for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ )
{
ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry();
if (rGeom.PointsNumber() == 2)
{
Node<3>& rNode0 = rGeom[0];
int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar);
Node<3>& rNode1 = rGeom[1];
int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar);
// If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition)
if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) )
{
array_1d<double,3> DeltaVel = rNode0.FastGetSolutionStepValue(FRACT_VEL) + rNode1.FastGetSolutionStepValue(FRACT_VEL);
rNode0.GetValue(FRACT_VEL) = DeltaVel;
rNode1.GetValue(FRACT_VEL) = DeltaVel;
}
}
else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum)
{
array_1d<double,3> DeltaVel = rGeom[0].FastGetSolutionStepValue(FRACT_VEL);
for (unsigned int i = 1; i < 4; i++)
{
DeltaVel += rGeom[i].FastGetSolutionStepValue(FRACT_VEL);
}
for (unsigned int i = 0; i < 4; i++)
{
rGeom[i].GetValue(FRACT_VEL) = DeltaVel;
}
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(FRACT_VEL);
for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
array_1d<double,3>& rDeltaVel = itNode->GetValue(FRACT_VEL);
if ( rDeltaVel[0]*rDeltaVel[0] + rDeltaVel[1]*rDeltaVel[1] + rDeltaVel[2]*rDeltaVel[2] != 0.0)
{
itNode->FastGetSolutionStepValue(FRACT_VEL) = itNode->GetValue(FRACT_VEL);
rDeltaVel = ZeroVector(3);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void InitializeStrategy(
SolverSettingsType& rSolverConfig,
bool PredictorCorrector)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
mDomainSize = rSolverConfig.GetDomainSize();
mPredictorCorrector = PredictorCorrector;
mUseSlipConditions = rSolverConfig.UseSlipConditions();
mReformDofSet = rSolverConfig.GetReformDofSet();
auto& r_process_info = BaseType::GetModelPart().GetProcessInfo();
if (r_process_info.Has(FS_PRESSURE_GRADIENT_RELAXATION_FACTOR)) {
mPressureGradientRelaxationFactor = r_process_info[FS_PRESSURE_GRADIENT_RELAXATION_FACTOR];
KRATOS_INFO("FractionalStepStrategy") << "Using fractional step strategy with "
"pressure gradient relaxation = "
<< mPressureGradientRelaxationFactor << ".\n";
} else {
mPressureGradientRelaxationFactor = 1.0;
r_process_info.SetValue(FS_PRESSURE_GRADIENT_RELAXATION_FACTOR, mPressureGradientRelaxationFactor);
}
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity,mVelocityTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter);
}
else
{
KRATOS_ERROR << "FractionalStepStrategy error: No Velocity strategy defined in FractionalStepSettings" << std::endl;
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure,mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,mMaxPressureIter);
}
else
{
KRATOS_ERROR << "FractionalStepStrategy error: No Pressure strategy defined in FractionalStepSettings" << std::endl;
}
Process::Pointer pTurbulenceProcess;
bool HaveTurbulence = rSolverConfig.GetTurbulenceModel(pTurbulenceProcess);
if (HaveTurbulence)
mExtraIterationSteps.push_back(pTurbulenceProcess);
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
FractionalStepStrategy& operator=(FractionalStepStrategy const& rOther){}
/// Copy constructor.
FractionalStepStrategy(FractionalStepStrategy const& rOther){}
///@}
}; /// Class FStepStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_FRACTIONAL_STEP_STRATEGY
|
oski.c | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "util.h"
#include <omp.h>
/**
* \brief Given an \f$m\times n\f$ CSR matrix \f$A\f$,
* estimates the fill ratio if the matrix were converted
* into \f$r\times c\f$ BCSR format.
*
* The caller supplies this routine with a maximum column
* block size \f$C\f$, and this routine returns the
* estimated fill ratios for all \f$1 \leq c \leq C\f$.
*
* If the converted matrix has \f$n_b\f$ blocks, this
* implementation executes in
* \f$O(\mbox{stored non-zeros}) = O(n_b\cdot r\cdot c)\f$
* time, but requires \f$O(C\cdot n)\f$ auxiliary storage
* space to store a dense copies of the block rows.
*
* This routine assumes the CSR matrix uses full storage,
* but otherwise is flexible with regard to the following
* variations:
* - Column indices do not have to be sorted.
*
* \param[in] ptr CSR row pointers.
* \param[in] ind CSR column indices.
* \param[in] m Logical number of matrix rows
* \param[in] n Logical number of matrix columns
* \param[in] r Desired row block size
* \param[in] B Maximum desired column block size (\f$C\f$).
* examining each block row.
* \param[in] prob_examine the probability a block gets examined.
* \param[in,out] p_nnz_est Used to return the number of
* non-zeros actually examined. Must be non-NULL.
* \param[in, out] p_nb_est Used to return the number of
* \f$r\times c\f$ blocks that would be created for the
* non-zeros examined. Must be non-NULL array of length
* \f$C = \f$ B.
*
* \returns On success, returns 0, sets *p_nnz_est
* to the number of non-zeros examined, and sets p_nb_est[c-1]
* to the number of non-zero blocks that are needed to
* store the examined non-zeros in \f$r \times c\f$ format.
* On error, returns an error code and leaves p_bptr, p_bind,
* and p_bval unchanged.
*/
static int EstimateBlockCounts(const size_t * ptr, const size_t * ind,
size_t m, size_t n,
size_t r, size_t B,
double prob_examine, size_t * p_nnz_est, size_t * p_nb_est)
{
assert(p_nnz_est != NULL);
assert(p_nb_est != NULL);
memset(p_nb_est, 0, sizeof(size_t) * B);
if (n == 0) {
*p_nnz_est = 0;
return 0; /* Quick return */
}
double rands[m/r];
for(int i = 0; i < m/r; i++){
rands[i] = random_uniform();
}
#pragma omp parallel firstprivate(m, n, r, B, prob_examine)
{
size_t my_p_nb_est[B];
/* block dimensions */
size_t M;
/* stores total number of non-zero blocks */
size_t num_nonzeros;
/* auxiliary storage: reused for each block-row */
size_t block_count[B * n]; /* size N */
for(size_t i = 0; i < B * n; i++){
block_count[i] = 0;
}
size_t I; /* block-row iteration variable */
M = m / r; /* # of full block-rows */
/*
* ---------------------------------------------------- Allocate
* temporary space.
*/
/** Get the block count for block column size c, block column J. */
#define GET_BC(A, c, J) (A)[((c)-1)*n + (J)]
/** Increment the block count for block column size c, block column J. */
#define INC_BC(A, c, J) (A)[((c)-1)*n + (J)]++
/** Set the block count for block column size c, block column J, to zero. */
#define ZERO_BC(A, c, J) (A)[((c)-1)*n + (J)] = 0
/*
* ---------------------------------------------------- Phase I:
* Count the number of new blocks to create.
*/
for(size_t i = 0; i < B; i++){
my_p_nb_est[i] = 0;
}
num_nonzeros = 0;
#pragma omp for schedule(dynamic)
for (I = 0; I < M; I++) { /* loop over block rows */
size_t i;
size_t di;
if (rands[I] > prob_examine){
continue; /* skip this block row */
}else{
/*
* Count the number of blocks within block-row I, and
* remember in 'block_count' which of the possible blocks
* have been 'visited' (i.e., contain at least 1 non-zero).
*/
for (i = I * r, di = 0; di < r; di++, i++) {
size_t k;
/*
* Invariant: block_count[J] == # of non-zeros
* encountered in rows I*r .. I*r+di that should be
* stored in column-block J (i.e., that have column
* indices J*c <= j < (J+1)*c).
*/
/*
* Count the number of additional logical blocks
* needed to store non-zeros in row i, and mark the
* blocks in block row I that have been visited.
*/
for (k = ptr[i]; k < ptr[i + 1]; k++) {
size_t j = ind[k]; /* column index */
size_t c;
for (c = 1; c <= B; c++) {
size_t J = j / c; /* block column index */
if (GET_BC(block_count, c, J) == 0) {
/* "create" (count) new block */
INC_BC(block_count, c, J);
my_p_nb_est[c - 1]++;
}
}
}
}
}
num_nonzeros += ptr[i] - ptr[I * r];
/* POST: num_nonzeros == total # of non-zeros examined so far */
/* POST: num_blocks == total # of new blocks in rows 0..i */
/*
* POST: block_count[c,J] == # of non-zeros in block J of
* block-row I
*/
/*
* Reset block_count for next iteration, I+1. This second
* loop is needed to keep the complexity of phase I to
* O(nnz).
*/
for (i = I * r, di = 0; di < r; di++, i++) {
size_t k;
for (k = ptr[i]; k < ptr[i + 1]; k++) {
size_t j = ind[k]; /* column index */
size_t c;
for (c = 1; c <= B; c++) {
size_t J = j / c; /* block column index */
ZERO_BC(block_count, c, J);
}
}
}
}
/* POST: num_blocks == total # of blocks in examined rows. */
/* POST: num_nonzeros == total # of non-zeros in examined rows. */
{
#pragma omp atomic
*p_nnz_est += num_nonzeros;
for(int foo = 0; foo < B; foo++){
#pragma omp atomic
p_nb_est[foo] += my_p_nb_est[foo];
}
}
}
return 0;
}
char * name() {
return "oski";
}
/**
* Given an m by n CSR matrix A, estimates the fill ratio if the matrix were
* converted into b_r by b_c BCSR format. The fill ratio is b_r times b_c times
* the number of nonzero blocks in the BCSR format divided by the number of
* nonzeros. All estimates should be accurate to relative error epsilon with
* probability at least (1 - delta).
*
* The caller supplies this routine with a maximum row and column block size B,
* and this routine returns the estimated fill ratios for all
* 1 <= b_r, b_c <= B.
*
* This routine assumes the CSR matrix uses full storage, and assumes that
* column indicies are sorted.
*
* \param[in] m Logical number of matrix rows
* \param[in] n Logical number of matrix columns
* \param[in] nnz Logical number of matrix nonzeros
* \param[in] *ptr CSR row pointers.
* \param[in] *ind CSR column indices.
* \param[in] B Maximum desired block size
* \param[in] epsilon Epsilon
* \param[in] delta Delta
* \param[out] *fill Fill ratios for all specified b_r, b_c in order
* \param[in] verbose 0 if you should be quiet
*
* Note that the fill ratios should be stored according to the following order:
* size_t i = 0;
* for (size_t b_r = 1; b_r <= B; b_r++) {
* for (size_t b_c = 1; b_c <= B; b_c++) {
* //fill[i] = fill for b_r, b_c, o_r, o_c
* i++;
* }
* }
*
* \returns On success, returns 0. On error, returns an error code.
*/
int estimate_fill (size_t m,
size_t n,
size_t nnz,
const size_t *ptr,
const size_t *ind,
size_t B,
double epsilon,
double delta,
double *fill,
int verbose){
int r;
size_t *nb_est;
int err;
int j = 0;
nb_est = malloc(sizeof(size_t) * B);
if (nb_est == NULL)
return -1;
for (r = 1; r <= B; r++) {
size_t nnz_est;
int c;
nnz_est = 0;
memset(nb_est, 0, sizeof(size_t) * B);
err = EstimateBlockCounts(ptr, ind,
m, n, r, B, delta, &nnz_est, nb_est);
if (err) {
free(nb_est);
return err;
}
for (c = 1; c <= B; c++) {
size_t nb_nnz = nb_est[c - 1] * r * c;
double ratio;
if (!nnz_est)
ratio = nb_nnz ? (1.0 / 0.0) : 1.0;
else
ratio = (double)nb_nnz / nnz_est;
fill[j] = ratio;
j++;
}
}
free(nb_est);
return 0;
}
|
inner_product.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
In this file, the inner product of two vector fields is computed.
*/
#include <stdio.h>
#include "../game_types.h"
int inner_product(Vector_field in_field_0, Vector_field in_field_1, Scalar_field out_field, Grid *grid)
{
/*
This function computes the inner product of the two vector fields in_field_0 and in_field_1. This is needed for computing the dissipation due to momentum diffusion (friction).
*/
int i, no_of_edges, base_index;
#pragma omp parallel for private (i, no_of_edges, base_index)
for (int h_index = 0; h_index < NO_OF_SCALARS_H; ++h_index)
{
no_of_edges = 6;
if (h_index < NO_OF_PENTAGONS)
{
no_of_edges = 5;
}
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
i = layer_index*NO_OF_SCALARS_H + h_index;
base_index = 8*i;
out_field[i] = 0;
for (int j = 0; j < no_of_edges; ++j)
{
out_field[i] += grid -> inner_product_weights[base_index + j]*in_field_0[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + j]]*
in_field_1[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + j]];
}
out_field[i] += grid -> inner_product_weights[base_index + 6]*in_field_0[h_index + layer_index*NO_OF_VECTORS_PER_LAYER]*in_field_1[h_index + layer_index*NO_OF_VECTORS_PER_LAYER];
out_field[i] += grid -> inner_product_weights[base_index + 7]*in_field_0[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER]*in_field_1[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER];
}
}
return 0;
}
|
FullyDistSpVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.2 -------------------------------------------------*/
/* date: 10/06/2011 --------------------------------------------*/
/* authors: Aydin Buluc (abuluc@lbl.gov), Adam Lugowski --------*/
/****************************************************************/
/*
Copyright (c) 2011, Aydin Buluc
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_SP_VEC_H_
#define _FULLY_DIST_SP_VEC_H_
#include <iostream>
#include <vector>
#include <utility>
#include <unordered_set>
#include "CommGrid.h"
#include "promote.h"
#include "SpParMat.h"
#include "FullyDist.h"
#include "Exception.h"
#include "OptBuf.h"
#include "CombBLAS.h"
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class FullyDistVec;
template <class IU, class NU>
class SparseVectorLocalIterator;
/**
* A sparse vector of length n (with nnz <= n of them being nonzeros) is distributed to
* "all the processors" in a way that "respects ordering" of the nonzero indices
* Example: x = [5,1,6,2,9] for nnz(x)=5 and length(x)=12
* we use 4 processors P_00, P_01, P_10, P_11
* Then P_00 owns [1,2] (in the range [0,...,2]), P_01 ow`ns [5] (in the range [3,...,5]), and so on.
* In the case of A(v,w) type sparse matrix indexing, this doesn't matter because n = nnz
* After all, A(v,w) will have dimensions length(v) x length (w)
* v and w will be of numerical type (NT) "int" and their indices (IT) will be consecutive integers
* It is possibly that nonzero counts are distributed unevenly
* Example: x=[1,2,3,4,5] and length(x) = 20, then P_00 would own all the nonzeros and the rest will hold empry vectors
* Just like in SpParMat case, indices are local to processors (they belong to range [0,...,length-1] on each processor)
* \warning Always create vectors with the right length, setting elements won't increase its length (similar to operator[] on std::vector)
**/
template <class IT, class NT>
class FullyDistSpVec: public FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>
{
public:
FullyDistSpVec ( );
FullyDistSpVec ( IT glen );
FullyDistSpVec ( shared_ptr<CommGrid> grid);
FullyDistSpVec ( shared_ptr<CommGrid> grid, IT glen);
FullyDistSpVec (const FullyDistVec<IT,NT> & rhs); // Conversion copy-constructor
//! like operator=, but instead of making a deep copy it just steals the contents.
//! Useful for places where the "victim" will be distroyed immediately after the call.
void stealFrom(FullyDistSpVec<IT,NT> & victim);
FullyDistSpVec<IT,NT> & operator=(const FullyDistSpVec< IT,NT > & rhs);
FullyDistSpVec<IT,NT> & operator=(const FullyDistVec< IT,NT > & rhs); // convert from dense
FullyDistSpVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistSpVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
ifstream& ReadDistribute (ifstream& infile, int master, HANDLER handler);
ifstream& ReadDistribute (ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler()); }
template <typename NNT> operator FullyDistSpVec< IT,NNT > () const //!< Type conversion operator
{
FullyDistSpVec<IT,NNT> CVT(commGrid);
CVT.ind = vector<IT>(ind.begin(), ind.end());
CVT.num = vector<NNT>(num.begin(), num.end());
CVT.glen = glen;
return CVT;
}
bool operator==(const FullyDistSpVec<IT,NT> & rhs) const
{
FullyDistVec<IT,NT> v = *this;
FullyDistVec<IT,NT> w = rhs;
return (v == w);
}
void PrintInfo(string vecname) const;
void iota(IT globalsize, NT first);
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //!< SpRef (expects ri to be 0-based)
void SetElement (IT indx, NT numx); // element-wise assignment
void DelElement (IT indx); // element-wise deletion
/**
* @brief Remove elements in index from the set
*/
template <typename E>
void removeFromHash(std::unordered_set<E> &localUnvisitedVertices)
{
for(auto e: ind)
{
localUnvisitedVertices.erase(e);
}
}
NT operator[](IT indx);
bool WasFound() const { return wasFound; }
// sort the vector itself
// return the permutation vector (0-based)
FullyDistSpVec<IT, IT> sort();
IT getlocnnz() const
{
return ind.size();
}
IT getnnz() const
{
IT totnnz = 0;
IT locnnz = ind.size();
MPI_Allreduce( &locnnz, &totnnz, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld());
return totnnz;
}
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::MyLocLength;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::MyRowLength;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::RowLenUntil;
void setNumToInd()
{
IT offset = LengthUntil();
IT spsize = ind.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i< spsize; ++i)
num[i] = ind[i] + offset;
}
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
transform(num.begin(), num.end(), num.begin(), __unary_op);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
IT spsize = ind.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < spsize; ++i)
num[i] = __binary_op(num[i], ind[i] + offset);
}
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT init);
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op);
void DebugPrint();
shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
void Reset();
NT GetLocalElement(IT indx);
void BulkSet(IT inds[], int count);
protected:
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::commGrid;
private:
vector< IT > ind; // ind.size() give the number of nonzeros
vector< NT > num;
bool wasFound; // true if the last GetElement operation returned an actual value.
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class SparseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x );
template <typename SR, typename IU, typename NUM, typename UDER>
friend FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue);
template <typename VT, typename IU, typename UDER> // NoSR version (in BFSFriends.h)
friend FullyDistSpVec<IU,VT> SpMV (const SpParMat<IU,bool,UDER> & A, const FullyDistSpVec<IU,VT> & x, OptBuf<int32_t, VT > & optbuf);
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf);
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp);
template <typename IU>
friend void RandPerm(FullyDistSpVec<IU,IU> & V); // called on an existing object, randomly permutes it
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
//! Helper functions for sparse matrix X sparse vector
template <typename SR, typename IU, typename OVT>
friend void MergeContributions(FullyDistSpVec<IU,OVT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, OVT * & recvnumbuf, int rowneighs);
template <typename IU, typename VT>
friend void MergeContributions(FullyDistSpVec<IU,VT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, VT * & recvnumbuf, int rowneighs);
template<typename IU, typename NV>
friend void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue);
};
#include "FullyDistSpVec.cpp"
#endif
|
GB_binop__times_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__times_int64
// A.*B function (eWiseMult): GB_AemultB__times_int64
// A*D function (colscale): GB_AxD__times_int64
// D*A function (rowscale): GB_DxB__times_int64
// C+=B function (dense accum): GB_Cdense_accumB__times_int64
// C+=b function (dense accum): GB_Cdense_accumb__times_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int64
// C=scalar+B GB_bind1st__times_int64
// C=scalar+B' GB_bind1st_tran__times_int64
// C=A+scalar GB_bind2nd__times_int64
// C=A'+scalar GB_bind2nd_tran__times_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x * y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT64 || GxB_NO_TIMES_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__times_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__times_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__times_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__times_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__times_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__times_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__times_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__times_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__times_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__times_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB_bind1st_tran__times_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB_bind2nd_tran__times_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2013 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A varray of c_expr_t. */
/* Append a new c_expr_t element to V. */
#define C_EXPR_APPEND(V, ELEM) \
do { \
c_expr_t __elem = (ELEM); \
vec_safe_push (V, __elem); \
} while (0)
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier. */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int128,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (location_t, int opt, const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void push_init_level (int, struct obstack *);
extern struct c_expr pop_init_level (int, struct obstack *);
extern void set_init_index (tree, tree, struct obstack *);
extern void set_init_label (tree, struct obstack *);
extern void process_init_element (struct c_expr, bool, struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
extern tree c_finish_transaction (location_t, tree, int);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Mode used to build pointers (VOIDmode means ptr_mode). */
extern enum machine_mode c_default_pointer_mode;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
Conv2DRef.h | // Copyright 2019 MAI. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "include/Type.h"
namespace MAI {
namespace Op {
namespace CPU {
namespace Ref {
template<typename T, DataFormat INPUT_FORMAT, DataFormat FILTER_FORMAT>
struct Conv2D {
static void conv2d(const T* input,
const std::vector<shape_t>& inputShape,
const T* filter,
const std::vector<shape_t>& filterShape,
const T* bias,
const std::vector<shape_t>& biasShape,
const Conv2DParam* param,
T* output,
const std::vector<shape_t>& outputShape);
};
template<typename T>
struct Conv2D<T, NCHW, OIHW> {
static void conv2d(const T* input,
const std::vector<shape_t>& inputShape,
const T* filter,
const std::vector<shape_t>& filterShape,
const T* bias,
const std::vector<shape_t>& biasShape,
const Conv2DParam* param,
T* output,
const std::vector<shape_t>& outputShape) {
int32 outputGroupChannelSize = outputShape[1] / param->group;
int32 inputGroupChannelSize = inputShape[1] / param->group;
#pragma omp parallel for collapse(2)
for(shape_t n = 0; n < outputShape[0]; ++n) {
for(shape_t o = 0; o < outputShape[1]; ++o) {
for(shape_t h = 0; h < outputShape[2]; ++h) {
for(shape_t w = 0; w < outputShape[3]; ++w) {
T* outputV = output + offset4D(outputShape, n, o, h, w);
shape_t inHBase = h * param->strides[DataFormatIndex<NCHW>::H] - param->paddings[0];
shape_t inWBase = w * param->strides[DataFormatIndex<NCHW>::W] - param->paddings[2];
int32 group = o / outputGroupChannelSize;// The group th of output channel
for(shape_t i = group * inputGroupChannelSize; i < (group + 1) * inputGroupChannelSize; ++i) {
for(shape_t fh = 0; fh < filterShape[DataFormatIndex<OIHW>::H]; ++fh) {
for(shape_t fw = 0; fw < filterShape[DataFormatIndex<OIHW>::W]; ++fw) {
shape_t inHOffset = inHBase + fh;
shape_t inWOffset = inWBase + fw;
if (inHOffset >= 0 && inHOffset < inputShape[DataFormatIndex<NCHW>::H]
&& inWOffset >= 0 && inWOffset < inputShape[DataFormatIndex<NCHW>::W]) {
shape_t inputOffset = offset4D(inputShape, n, i, inHOffset, inWOffset);
shape_t filterOffset = offset4D(filterShape, o, i % inputGroupChannelSize, fh, fw);
const T* inputV = input + inputOffset;
const T* filterV = filter + filterOffset;
*outputV += (*inputV) * (*filterV);
}
}
}
}
if (bias != NULL) {
*outputV += *(bias + o);
}
}
}
}
}
}
};
template<typename T>
struct Conv2D<T, NHWC, HWIO> {
static void conv2d(const T* input,
const std::vector<shape_t>& inputShape,
const T* filter,
const std::vector<shape_t>& filterShape,
const T* bias,
const std::vector<shape_t>& biasShape,
const Conv2DParam* param,
T* output,
const std::vector<shape_t>& outputShape) {
int32 outputGroupChannelSize = outputShape[DataFormatIndex<NHWC>::C] / param->group;
int32 inputGroupChannelSize = inputShape[DataFormatIndex<NHWC>::C] / param->group;
#pragma omp parallel for collapse(4)
for(shape_t n = 0; n < outputShape[0]; ++n) {
for(shape_t h = 0; h < outputShape[1]; ++h) {
for(shape_t w = 0; w < outputShape[2]; ++w) {
for(shape_t o = 0; o < outputShape[3]; ++o) {
T* outputV = output + offset4D(outputShape, n, h, w, o);
shape_t inHBase = h * param->strides[DataFormatIndex<NHWC>::H] - param->paddings[0];
shape_t inWBase = w * param->strides[DataFormatIndex<NHWC>::W] - param->paddings[2];
int32 group = o / outputGroupChannelSize;// The group th of output channel
for(shape_t i = group * inputGroupChannelSize; i < (group + 1) * inputGroupChannelSize; ++i) {
for(shape_t fh = 0; fh < filterShape[DataFormatIndex<HWIO>::H]; ++fh) {
for(shape_t fw = 0; fw < filterShape[DataFormatIndex<HWIO>::H]; ++fw) {
shape_t inHOffset = inHBase + fh;
shape_t inWOffset = inWBase + fw;
if (inHOffset >= 0 && inHOffset < inputShape[DataFormatIndex<NHWC>::H]
&& inWOffset >= 0 && inWOffset < inputShape[DataFormatIndex<NHWC>::W]) {
shape_t inputOffset = offset4D(inputShape, n, inHOffset, inWOffset, i);
shape_t filterOffset = offset4D(filterShape, fh, fw, i % inputGroupChannelSize, o);
const T* inputV = input + inputOffset;
const T* filterV = filter + filterOffset;
*outputV += (*inputV) * (*filterV);
}
}
}
}
if (bias != NULL) {
*outputV += *(bias + o);
}
}
}
}
}
}
};
} // namespace Ref
} // namespace CPU
} // namespace Op
} // namespace MAI
|
privatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// tmp should be put as private to avoid race condition
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len];
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
return 0;
}
|
core_dtrssq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrssq.c, normal z -> d, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
// This computation also shows up in plasma_core_dsyssq() and can be factored out.
// LAPACK does real and imag components separately in dlassq.
static inline void ssq(double value, double *scale, double *sumsq)
{
double absa = fabs(value);
if (absa != 0.0) { // != propagates nan
if (*scale < absa) {
*sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa));
*scale = absa;
}
else {
*sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale)));
}
}
}
/******************************************************************************/
__attribute__((weak))
void plasma_core_dtrssq(plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const double *A, int lda,
double *scale, double *sumsq)
{
if (uplo == PlasmaUpper) {
if (diag == PlasmaNonUnit) {
for (int j = 0; j < n; j++) {
ssq(A[lda*j], scale, sumsq);
for (int i = 1; i < imin(j+1, m); i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
else { // PlasmaUnit
int j;
for (j = 0; j < imin(n, m); j++) {
ssq(1.0, scale, sumsq);
for (int i = 0; i < j; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
for (; j < n; j++) {
ssq(A[lda*j], scale, sumsq);
for (int i = 1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
}
else { // PlasmaLower
if (diag == PlasmaNonUnit) {
for (int j = 0; j < imin(n, m); j++) {
ssq(A[lda*j+j], scale, sumsq);
for (int i = j+1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
else { // PlasmaUnit
for (int j = 0; j < imin(n, m); j++) {
ssq(1.0, scale, sumsq);
for (int i = j+1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
}
}
/******************************************************************************/
void plasma_core_omp_dtrssq(plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const double *A, int lda,
double *scale, double *sumsq,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:scale[0:n]) \
depend(out:sumsq[0:n])
{
if (sequence->status == PlasmaSuccess) {
*scale = 0.0;
*sumsq = 1.0;
plasma_core_dtrssq(uplo, diag, m, n, A, lda, scale, sumsq);
}
}
}
|
HaloExchangeNeighborTwo.h | #include "mpi.h"
#include <memory>
using std::shared_ptr;
#define U_OUT(i,j) U_OUT[(i)*(n+4)+(j)]
#define U_IN(i,j) U_IN[(i)*n+(j)]
#define GRAD_U_OUT(i,j) GRAD_U_OUT[(i)*(n+4)+(j)]
#define GRAD_U_IN(i,j) GRAD_U_IN[(i)*n+(j)]
#define RANK(I,J) ((I-1)*N+(J-1))
void HaloExchangeNeighborTwo_forward(double *U_OUT, const double *U_IN, double fill_value,
int M, int N, int n, int tag){
int rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Comm_rank( comm , &rank);
int I = rank/N + 1;
int J = rank%N + 1;
#pragma omp parallel for
for(int i = 0; i<n; i++){
for(int j = 0; j<n; j++){
U_OUT(i+2, j+2) = U_IN(i, j);
}
}
shared_ptr<double> iupper_in(new double[2*n]);
shared_ptr<double> ilower_in(new double[2*n]);
shared_ptr<double> jupper_in(new double[2*n]);
shared_ptr<double> jlower_in(new double[2*n]);
shared_ptr<double> iupper(new double[2*n]);
shared_ptr<double> ilower(new double[2*n]);
shared_ptr<double> jupper(new double[2*n]);
shared_ptr<double> jlower(new double[2*n]);
MPI_Request request[8];
MPI_Status status[8];
int n_request = 0;
for(int k = 0; k<n; k++){
iupper_in.get()[k] = U_IN(0, k);
iupper_in.get()[n + k] = U_IN(1, k);
ilower_in.get()[k] = U_IN(n-2, k);
ilower_in.get()[n + k] = U_IN(n-1, k);
jupper_in.get()[k] = U_IN(k, 0);
jupper_in.get()[n + k] = U_IN(k, 1);
jlower_in.get()[k] = U_IN(k, n-2);
jlower_in.get()[n + k] = U_IN(k, n-1);
}
if (I>1){
MPI_Isend( iupper_in.get() , 2*n , MPI_DOUBLE , RANK(I-1, J) , tag , comm , request + n_request);
MPI_Irecv( iupper.get() , 2*n , MPI_DOUBLE , RANK(I-1,J) , tag , comm , request + n_request+1);
n_request += 2;
} else{
for(int i = 0; i< 2*n; i++) iupper.get()[i] = fill_value;
}
if (I<M){
MPI_Isend( ilower_in.get() , 2*n , MPI_DOUBLE , RANK(I+1, J) , tag , comm , request + n_request);
MPI_Irecv( ilower.get() , 2*n , MPI_DOUBLE , RANK(I+1,J) , tag , comm , request + n_request+1);
n_request += 2;
} else{
for(int i = 0; i< 2*n; i++) ilower.get()[i] = fill_value;
}
if (J>1){
MPI_Isend( jupper_in.get() , 2*n , MPI_DOUBLE , RANK(I, J-1) , tag , comm , request + n_request);
MPI_Irecv( jupper.get() , 2*n , MPI_DOUBLE , RANK(I,J-1) , tag , comm , request + n_request+1);
n_request += 2;
}else{
for(int i = 0; i< 2*n; i++) jupper.get()[i] = fill_value;
}
if (J<N){
MPI_Isend( jlower_in.get() , 2*n , MPI_DOUBLE , RANK(I, J+1) , tag , comm , request + n_request);
MPI_Irecv( jlower.get() , 2*n , MPI_DOUBLE , RANK(I,J+1) , tag , comm , request + n_request+1);
n_request += 2;
}else{
for(int i = 0; i< 2*n; i++) jlower.get()[i] = fill_value;
}
if (n_request>0){
MPI_Waitall( n_request , request , status);
}
for(int k = 0; k<n; k++){
U_OUT(0, k+2) = iupper.get()[k];
U_OUT(1, k+2) = iupper.get()[n + k];
U_OUT(k+2, 0) = jupper.get()[k];
U_OUT(k+2, 1) = jupper.get()[n + k];
U_OUT(n+2, k+2) = ilower.get()[k];
U_OUT(n+3, k+2) = ilower.get()[n + k];
U_OUT(k+2, n+2) = jlower.get()[k];
U_OUT(k+2, n+3) = jlower.get()[n + k];
}
}
void HaloExchangeNeighborTwo_backward(
double *GRAD_U_IN,
const double *GRAD_U_OUT,
const double *U_OUT, const double *U_IN, double fill_value,
int M, int N, int n, int tag){
int rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Comm_rank( comm , &rank);
int I = rank/N + 1;
int J = rank%N + 1;
#pragma omp parallel for
for(int i = 0; i<n; i++){
for(int j = 0; j<n; j++){
GRAD_U_IN(i, j) = GRAD_U_OUT(i+2, j+2);
}
}
shared_ptr<double> iupper_in(new double[2*n]);
shared_ptr<double> ilower_in(new double[2*n]);
shared_ptr<double> jupper_in(new double[2*n]);
shared_ptr<double> jlower_in(new double[2*n]);
shared_ptr<double> iupper(new double[2*n]);
shared_ptr<double> ilower(new double[2*n]);
shared_ptr<double> jupper(new double[2*n]);
shared_ptr<double> jlower(new double[2*n]);
MPI_Request request[8];
MPI_Status status[8];
int n_request = 0;
for(int k = 0; k<n; k++){
iupper_in.get()[k] = GRAD_U_OUT(0, k+2);
iupper_in.get()[n + k] = GRAD_U_OUT(1, k+2);
ilower_in.get()[k] = GRAD_U_OUT(n-2+4, k+2);
ilower_in.get()[n + k] = GRAD_U_OUT(n-1+4, k+2);
jupper_in.get()[k] = GRAD_U_OUT(k+2, 0);
jupper_in.get()[n + k] = GRAD_U_OUT(k+2, 1);
jlower_in.get()[k] = GRAD_U_OUT(k+2, n-2+4);
jlower_in.get()[n + k] = GRAD_U_OUT(k+2, n-1+4);
}
if (I>1){
MPI_Isend( iupper_in.get() , 2*n , MPI_DOUBLE , RANK(I-1, J) , tag , comm , request + n_request);
MPI_Irecv( iupper.get() , 2*n , MPI_DOUBLE , RANK(I-1,J) , tag , comm , request + n_request+1);
n_request += 2;
} else{
for(int i = 0; i< 2*n; i++) iupper.get()[i] = 0.0;
}
if (I<M){
MPI_Isend( ilower_in.get() , 2*n , MPI_DOUBLE , RANK(I+1, J) , tag , comm , request + n_request);
MPI_Irecv( ilower.get() , 2*n , MPI_DOUBLE , RANK(I+1,J) , tag , comm , request + n_request+1);
n_request += 2;
} else{
for(int i = 0; i< 2*n; i++) ilower.get()[i] = 0.0;
}
if (J>1){
MPI_Isend( jupper_in.get() , 2*n , MPI_DOUBLE , RANK(I, J-1) , tag , comm , request + n_request);
MPI_Irecv( jupper.get() , 2*n , MPI_DOUBLE , RANK(I,J-1) , tag , comm , request + n_request+1);
n_request += 2;
}else{
for(int i = 0; i< 2*n; i++) jupper.get()[i] = 0.0;
}
if (J<N){
MPI_Isend( jlower_in.get() , 2*n , MPI_DOUBLE , RANK(I, J+1) , tag , comm , request + n_request);
MPI_Irecv( jlower.get() , 2*n , MPI_DOUBLE , RANK(I,J+1) , tag , comm , request + n_request+1);
n_request += 2;
}else{
for(int i = 0; i< 2*n; i++) jlower.get()[i] = 0.0;
}
if (n_request>0){
MPI_Waitall( n_request , request , status);
}
for(int k = 0; k<n; k++){
GRAD_U_IN(0, k) += iupper.get()[k];
GRAD_U_IN(1, k) += iupper.get()[n + k];
GRAD_U_IN(k, 0) += jupper.get()[k];
GRAD_U_IN(k, 1) += jupper.get()[n + k];
GRAD_U_IN(n-2, k) += ilower.get()[k];
GRAD_U_IN(n-1, k) += ilower.get()[n + k];
GRAD_U_IN(k, n-2) += jlower.get()[k];
GRAD_U_IN(k, n-1) += jlower.get()[n + k];
}
}
#undef U_OUT
#undef U_IN
#undef GRAD_U_OUT
#undef GRAD_U_IN
#undef RANK |
schedule-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n = 7,chunk, a[n],suma=0;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(static,chunk)
// El schedule organiza la forma de repartir las iteraciones del bucle
//Con static, establece un Round Robin, y departiendo de n en n, siendo n el chunck
//Es similar a un reparto de cartas, se reparte por jugador tantas cartas como
//determine el chunk.
for (i=0; i<n; i++)
{ suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",
omp_get_thread_num(),i,suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
return(0);
}
|
e3.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
int main ()
{
int i, nthreads, tid, section;
float a[N], b[N], c[N];
void print_results(float array[N], int tid, int section);
/* Some initializations */
for (i=0; i<N; i++)
a[i] = b[i] = i * 1.0;
#pragma omp parallel shared(a, b) private(c, i,tid,section)
{
tid = omp_get_thread_num(); // return thread id
if (tid == 0)
{
nthreads = omp_get_num_threads(); // returns number of threads
printf("Number of threads = %d\n", nthreads);
}
/*** Use barriers for clean output ***/
#pragma omp barrier
printf("Thread %d starting...\n",tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
section = 1;
for (i=0; i<N; i++)
c[i] = a[i] * b[i];
print_results(c, tid, section);
}
#pragma omp section
{
section = 2;
for (i=0; i<N; i++)
c[i] = a[i] + b[i];
print_results(c, tid, section);
}
} /* end of sections */
/*** Use barrier for clean output ***/
#pragma omp barrier
printf("Thread %d done and synchronized.\n", tid);
#pragma omp barrier
printf("Thread %d exiting...\n",tid);
} /* end of parallel section */
return 0;
}
void print_results(float array[N], int tid, int section)
{
int i,j;
j = 1;
/*** use critical for clean output ***/
#pragma omp critical
{
printf("\nThread %d did section %d. The results are:\n", tid, section);
for (i=0; i<N; i++) {
printf("%e ",array[i]);
j++;
if (j == 6) {
printf("\n");
j = 1;
}
}
printf("\n");
} /*** end of critical ***/
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
std::memcpy(dst, src, sizeof(DType) * size);
}
}
/*!
* \breif parallelize add by OpenMP
*/
template<typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
GB_unaryop__ainv_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_uint64
// op(A') function: GB_tran__ainv_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_uint64
(
uint64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
move.h | #pragma once
#include "core.h"
#include "energy.h"
#include "average.h"
//#include "analysis.h"
#include "potentials.h"
#include "mpi.h"
namespace Faunus {
namespace Move {
class Movebase {
private:
virtual void _move(Change&)=0; //!< Perform move and modify change object
virtual void _accept(Change&); //!< Call after move is accepted
virtual void _reject(Change&); //!< Call after move is rejected
virtual void _to_json(json &j) const=0; //!< Extra info for report if needed
virtual void _from_json(const json &j)=0; //!< Extra info for report if needed
TimeRelativeOfTotal<std::chrono::microseconds> timer;
protected:
unsigned long cnt=0;
unsigned long accepted=0;
unsigned long rejected=0;
public:
static Random slump; //!< Shared for all moves
std::string name; //!< Name of move
std::string cite; //!< Reference
int repeat=1; //!< How many times the move should be repeated per sweep
void from_json(const json &j);
void to_json(json &j) const; //!< JSON report w. statistics, output etc.
void move(Change &change); //!< Perform move and modify given change object
void accept(Change &c);
void reject(Change &c);
virtual double bias(Change &c, double uold, double unew); //!< adds extra energy change not captured by the Hamiltonian
};
void from_json(const json &j, Movebase &m); //!< Configure any move via json
void to_json(json &j, const Movebase &m);
/**
* @brief Swap the charge of a single atom
*/
template<typename Tspace>
class AtomicSwapCharge : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
Tspace& spc; // Space to operate on
int molid=-1;
double ln10 = log(10);
double pKa, pH;
Average<double> msqd; // mean squared displacement
double _sqd, _bias; // squared displament
std::string molname; // name of molecule to operate on
Change::data cdata;
void _to_json(json &j) const override {
j = {
{"pH", pH},
{"pka", pKa},
{"molid", molid},
{u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())},
{"molecule", molname}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
pH = j.at("pH").get<double>();
pKa = j.at("pKa").get<double>();
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end()); // repeat for each molecule...
if (repeat>0)
repeat = repeat * v.front().size(); // ...and for each atom
}
}
catch (std::exception &e) {
std::cerr << name << ": " << e.what();
throw;
}
} //!< Configure via json object
typename Tpvec::iterator randomAtom() {
assert(molid>=0);
auto mollist = spc.findMolecules( molid ); // all `molid` groups
if (size(mollist)>0) {
auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator
if (!git->empty()) {
auto p = slump.sample( git->begin(), git->end() ); // random particle iterator
cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group
return p;
}
}
return spc.p.end();
}
void _move(Change &change) override {
auto p = randomAtom();
if (p!=spc.p.end()) {
auto& g = spc.groups[cdata.index];
double oldcharge = p->charge;
p->charge = fabs(oldcharge - 1);
_sqd = fabs(oldcharge - 1) - oldcharge;
change.groups.push_back( cdata ); // add to list of moved groups
_bias = _sqd*(pH-pKa)*ln10; // one may add bias here...
}
}
double bias(Change &change, double uold, double unew) override {
return _bias;
} //!< adds extra energy change not captured by the Hamiltonian
void _accept(Change &change) override { msqd += _sqd; }
void _reject(Change &change) override { msqd += 0; }
public:
AtomicSwapCharge(Tspace &spc) : spc(spc) {
name = "swapcharge";
repeat = -1; // meaning repeat N times
cdata.atoms.resize(1);
cdata.internal=true;
}
};
/**
* @brief Translate and rotate a molecular group
*/
template<typename Tspace>
class AtomicTranslateRotate : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
Tspace& spc; // Space to operate on
int molid=-1;
Point dir={1,1,1};
Average<double> msqd; // mean squared displacement
double _sqd; // squared displament
std::string molname; // name of molecule to operate on
Change::data cdata;
void _to_json(json &j) const override {
j = {
{"dir", dir},
{"molid", molid},
{u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())},
{"molecule", molname}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
dir = j.value("dir", Point(1,1,1));
if (repeat<0) {
auto v = spc.findMolecules(molid, Tspace::ALL );
repeat = std::distance(v.begin(), v.end()); // repeat for each molecule...
if (repeat>0)
repeat = repeat * v.front().size(); // ...and for each atom
}
}
catch (std::exception &e) {
std::cerr << name << ": " << e.what();
throw;
}
} //!< Configure via json object
typename Tpvec::iterator randomAtom() {
assert(molid>=0);
//std::cout<<"molid "<<molid<<std::endl;
auto mollist = spc.findMolecules( molid, Tspace::ALL ); // all `molid` groups
if (size(mollist)>0) {
//std::cout<<"looking for atoms"<<std::endl;
auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator
if (!git->empty()) {
//std::cout<<"found molecule"<<std::endl;
auto p = slump.sample( git->begin(), git->end() ); // random particle iterator
cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group
return p;
}
}
return spc.p.end();
}
void _move(Change &change) override {
auto p = randomAtom();
if (p!=spc.p.end()) {
double dp = atoms<Tparticle>.at(p->id).dp;
double dprot = atoms<Tparticle>.at(p->id).dprot;
auto& g = spc.groups[cdata.index];
if (dp>0) { // translate
Point oldpos = p->pos;
p->pos += 0.5 * dp * ranunit(slump).cwiseProduct(dir);
spc.geo.boundaryFunc(p->pos);
_sqd = spc.geo.sqdist(oldpos, p->pos); // squared displacement
if (!g.atomic)
g.cm = Geometry::massCenter(g.begin(), g.end(), spc.geo.boundaryFunc, -g.cm);
}
if (dprot>0) { // rotate
Point u = ranunit(slump);
double angle = dprot * (slump()-0.5);
Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) );
p->rotate(Q, Q.toRotationMatrix());
}
if (dp>0 || dprot>0)
change.groups.push_back( cdata ); // add to list of moved groups
}
else
std::cerr << name << ": no atoms found" << std::endl;
}
void _accept(Change &change) override { msqd += _sqd; }
void _reject(Change &change) override { msqd += 0; }
public:
AtomicTranslateRotate(Tspace &spc) : spc(spc) {
name = "transrot";
repeat = -1; // meaning repeat N times
cdata.atoms.resize(1);
cdata.internal=true;
}
};
/**
* @brief Translate and rotate a molecular group
*/
template<typename Tspace>
class TranslateRotate : public Movebase {
protected:
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc; // Space to operate on
int molid=-1;
double dptrans=0;
double dprot=0;
Point dir={1,1,1};
double _sqd; // squared displacement
Average<double> msqd; // mean squared displacement
void _to_json(json &j) const override {
j = {
{"dir", dir}, {"dp", dptrans}, {"dprot", dprot},
{"molid", molid},
{u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())},
{"molecule", molecules<Tpvec>[molid].name}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
std::string molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
dir = j.value("dir", Point(1,1,1));
dprot = j.at("dprot");
dptrans = j.at("dp");
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end());
}
}
catch (std::exception &e) {
throw std::runtime_error(name+": " + e.what());
}
} //!< Configure via json object
void _move(Change &change) override {
assert(molid>=0);
assert(!spc.groups.empty());
assert(spc.geo.getVolume()>0);
// pick random group from the system matching molecule type
// TODO: This can be slow -- implement look-up-table in Space
auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid'
if (size(mollist)>0) {
auto it = slump.sample( mollist.begin(), mollist.end() );
if (!it->empty()) {
assert(it->id==molid);
if (dptrans>0) { // translate
Point oldcm = it->cm;
Point dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans;
it->translate( dp, spc.geo.boundaryFunc );
_sqd = spc.geo.sqdist(oldcm, it->cm); // squared displacement
}
if (dprot>0) { // rotate
Point u = ranunit(slump);
double angle = dprot * (slump()-0.5);
Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) );
it->rotate(Q, spc.geo.boundaryFunc);
}
if (dptrans>0||dprot>0) { // define changes
Change::data d;
d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
change.groups.push_back( d ); // add to list of moved groups
}
assert( spc.geo.sqdist( it->cm,
Geometry::massCenter(it->begin(),it->end(),spc.geo.boundaryFunc,-it->cm) ) < 1e-9 );
}
}
}
void _accept(Change &change) override { msqd += _sqd; }
void _reject(Change &change) override { msqd += 0; }
public:
TranslateRotate(Tspace &spc) : spc(spc) {
name = "moltransrot";
repeat = -1; // meaning repeat N times
}
};
/**
* @brief Move that will swap conformation of a molecule
*
* This will swap between different molecular conformations
* as defined in `MoleculeData` with `traj` and `weight`.
* If defined, the weight
* distribution is respected, otherwise all conformations
* have equal intrinsic weight. Upon insertion, the new conformation
* is randomly oriented and placed on top of the mass-center of
* an exising molecule. That is, there is no mass center movement.
*
* @todo Add feature to align molecule on top of an exiting one
* @todo Expand `_info()` to show number of conformations
* @warning Weighted distributions untested and not verified for correctness
* @date Malmo, November 2016
*/
template<class Tspace>
class ConformationSwap : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef MoleculeData<Tpvec> Tmoldata;
RandomInserter<Tmoldata> inserter;
Tspace& spc; // Space to operate on
int molid=-1;
int newconfid=-1;
void _to_json(json &j) const override {
j = {
{"molid", molid},
{"molecule", molecules<Tpvec>[molid].name}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
std::string molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
if ( molecules<Tpvec>[molid].numConformations()<2)
throw std::runtime_error("minimum two conformations required");
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end());
}
}
catch (std::exception &e) {
throw std::runtime_error(name+": " + e.what());
}
} //!< Configure via json object
void _move(Change &change) override {
assert(molid>=0);
assert(change.empty());
auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid'
if ( size(mollist)>0 ) {
auto g = slump.sample( mollist.begin(), mollist.end() );
if (not g->empty()) {
inserter.offset = g->cm;
Tpvec p = inserter(spc.geo, spc.p, molecules<Tpvec>[molid]); // get conformation
newconfid = molecules<Tpvec>[molid].getConfIndex();
if (p.size() != g->size())
throw std::runtime_error(name + ": conformation atom count mismatch");
std::copy( p.begin(), p.end(), g->begin() ); // override w. new conformation
#ifndef NDEBUG
// this move shouldn't move mass centers, so let's check if this is true:
Point newcm = Geometry::massCenter(p.begin(), p.end(), spc.geo.boundaryFunc);
if ( (newcm - g->cm).norm()>1e-6 )
throw std::runtime_error(name + ": unexpected mass center movement");
#endif
Change::data d;
d.index = Faunus::distance(spc.groups.begin(), g); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
d.internal = false; // we *don't* want to calculate the internal energy
change.groups.push_back( d ); // add to list of moved groups
}
}
}
void _accept(Change &change) override {
assert(change.groups.size()==1);
spc.groups[ change.groups.front().index ].confid = newconfid;
}
public:
ConformationSwap(Tspace &spc) : spc(spc) {
name = "conformationswap";
repeat = -1; // meaning repeat n times
}
}; // end of conformation swap move
/**
* @brief Sketch for MD move
*/
template<typename Tspace>
class ForceMove : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
void _to_json(json &j) const {};
void _from_json(const json &j) {};
std::vector<Point> forces, velocities;
public:
ForceMove() {
// resize forces and velocities to mathc spc.p
}
}; // end of forcemove
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("[Faunus] TranslateRotate")
{
typedef Particle<Radius, Charge, Dipole, Cigar> Tparticle;
typedef Space<Geometry::Cuboid, Tparticle> Tspace;
typedef typename Tspace::Tpvec Tpvec;
CHECK( !atoms<Tparticle>.empty() ); // set in a previous test
CHECK( !molecules<Tpvec>.empty() ); // set in a previous test
Tspace spc;
TranslateRotate<Tspace> mv(spc);
json j = R"( {"molecule":"B", "dp":1.0, "dprot":0.5, "dir":[0,1,0], "repeat":2 })"_json;
mv.from_json(j);
j = json(mv).at(mv.name);
CHECK( j.at("molecule") == "B");
CHECK( j.at("dir") == Point(0,1,0) );
CHECK( j.at("dp") == 1.0 );
CHECK( j.at("repeat") == 2 );
CHECK( j.at("dprot") == 0.5 );
}
#endif
template<typename Tspace>
class VolumeMove : public Movebase {
private:
const std::map<std::string, Geometry::VolumeMethod> methods = {
{"xy", Geometry::XY},
{"isotropic", Geometry::ISOTROPIC},
{"isochoric", Geometry::ISOCHORIC}
};
typename decltype(methods)::const_iterator method;
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc;
Average<double> msqd; // mean squared displacement
double dV=0, deltaV=0, Vnew=0, Vold=0;
void _to_json(json &j) const override {
using namespace u8;
j = {
{"dV", dV}, {"method", method->first},
{rootof + bracket(Delta + "V" + squared), std::sqrt(msqd.avg())},
{cuberoot + rootof + bracket(Delta + "V" + squared),
std::cbrt(std::sqrt(msqd.avg()))}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
method = methods.find( j.value("method", "isotropic") );
if (method==methods.end())
std::runtime_error("unknown volume change method");
dV = j.at("dV");
}
void _move(Change &change) override {
if (dV>0) {
change.dV=true;
change.all=true;
Vold = spc.geo.getVolume();
if (method->second == Geometry::ISOCHORIC)
Vold = std::pow(Vold,1.0/3.0); // volume is constant
Vnew = std::exp(std::log(Vold) + (slump()-0.5) * dV);
deltaV = Vnew-Vold;
spc.scaleVolume(Vnew, method->second);
} else deltaV=0;
}
void _accept(Change &change) override { msqd += deltaV*deltaV; }
void _reject(Change &change) override { msqd += 0; }
public:
VolumeMove(Tspace &spc) : spc(spc) {
name = "volume";
repeat = 1;
}
}; // end of VolumeMove
/*
* @brief Establishes equilibrium of matter
* Establishes equilibrium of matter between all species
*
* Consider the dissociation process AX=A+X. This class will locate
* all species of type AX and A and make a MC swap move between them.
* X is implicit, meaning that it enters only with its chemical potential
* (activity). The titrating species, their dissociation constants
* and the chemical potential of the titrant are read from a
* `processes` JSON object.
* For example, for proton titration of phosphate one would
* use the following JSON input (pH 7.0):
*
* @todo
* Implement classification of reactions to group weight in
* mc sweep {refrerence : prob(reference)}
*
*/
template<typename Tspace>
class SpeciationMove : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc;
Tspace *otherspc;
ReactionData<Tpvec> *trialprocess;
std::map<std::string, Average<double>> accmap;
double log_k;
bool forward;
std::vector<int> molDel; // index of groups to delete
std::vector<int> atomDel; // atom index to delete
std::map<int, int> molcnt_ins, atomcnt_ins,
molcnt_del, atomcnt_del,
molcnt, atomcnt; // id's and number of inserted/deleted mols and atoms
std::multimap<int, Tpvec> pmap; // coordinates of mols and atoms to be inserted
unsigned int Ndeleted, Ninserted; // Number of accepted deletions and insertions
void _to_json(json &j) const override {
j = {
// { "replicas", mpi.nproc() },
// { "datasize", pt.getFormat() }
};
json &_j = j["reactions"];
_j = json::object();
for (auto &m : accmap)
_j[m.first] = {
{"attempts", m.second.cnt},
{"acceptance", m.second.avg()}
};
}
void _from_json(const json &j) override {
//j["speciation"] = "speciation";
}
public:
SpeciationMove(Tspace &spc) : spc(spc) {
name = "speciation";
repeat = 1;
}
void setOther(Tspace &ospc) {
otherspc = &ospc;
}
double energy(); //!< Returns intrinsic energy of the process
void _move(Change &change) override {
if ( reactions<Tpvec>.size()>0 ) {
auto rit = slump.sample( reactions<Tpvec>.begin(), reactions<Tpvec>.end() );
log_k = rit->log_k;
forward = (bool)slump.range(0,1); // random boolean
trialprocess = &(*rit);
if ( rit->empty(forward) ) // Enforce canonic constraint if invoked
return; //Out of material, slip out the back door
for (auto &m : rit->Molecules2Add( !forward )) { // Delete checks
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
if( size(mollist)!=1 ) // There can be only one
throw std::runtime_error("Bad definition: One group per atomic molecule!");
auto git = mollist.begin();
if ( git->size() < m.second ) // assure that there are atoms enough in the group
return;
} else {
mollist = spc.findMolecules( m.first, Tspace::ACTIVE);
if ( size(mollist) < m.second )
return; // Not possible to perform change, escape through the back door
}
}
for (auto &m : rit->Molecules2Add( forward )) { // Addition checks
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
if( size(mollist)!=1 ) // There can be only one
throw std::runtime_error("Bad definition: One group per atomic molecule!");
auto git = mollist.begin();
if ( (git->size() + m.second) > git->capacity() ) // assure that there are atoms enough in the group
return; // if not slip out the back door
} else {
mollist = spc.findMolecules( m.first, Tspace::INACTIVE);
if ( size(mollist) < m.second )
return; // Not possible to perform change, escape through the back door
}
}
//The move is doable, raise flag
change.dNpart=true;
for (auto &m : rit->Molecules2Add( !forward )) { // Delete
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
if( size(mollist)!=1 ) // There can be only one
throw std::runtime_error("Bad definition: One group per atomic molecule!");
Change::data d;
auto git = mollist.begin();
auto othermollist = otherspc->findMolecules(m.first, Tspace::ALL); // implies that new and old are in sync
auto othergit=othermollist.begin();
d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
d.internal = true;
d.dNpart = true;
for ( int N=0; N<m.second; N++ ) { // deactivate m.second m.first atoms
auto ait = slump.sample( git->begin(), git->end()); // iterator to random atom
// Shuffle back to end, both in trial and new
auto nait = git->end()-1; //iterator to last atom
int dist = Faunus::distance( ait, git->end() ); // distance to random atom from end
if ( Faunus::distance( ait, nait) > 1 ) {
std::iter_swap(ait, nait);
std::iter_swap(othergit->end()-dist-N, othergit->end() - (1+N) );
}
d.atoms.push_back ( Faunus::distance(git->begin(), nait) );
git->deactivate( nait, git->end());
}
std::sort( d.atoms.begin(), d.atoms.end() );
change.groups.push_back( d ); // add to list of moved groups
} else {
mollist = spc.findMolecules( m.first, Tspace::ACTIVE);
for ( int N=0; N <m.second; N++ ) {
Change::data d;
auto git = slump.sample(mollist.begin(), mollist.end());
git->deactivate( git->begin(), git->end());
d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
change.groups.push_back( d ); // add to list of moved groups
mollist = spc.findMolecules( m.first , Tspace::ACTIVE);
// Activate/deactivate all? simply move end to front?
}
}
}
for (auto &m : rit->Molecules2Add( forward )) { // Add
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
Change::data d;
auto git = mollist.begin();
d.index = Faunus::distance( spc.groups.begin(), git);
d.internal = true;
d.dNpart = true;
for ( int N=0; N<m.second; N++ ) { // activate m.second m.first atoms
git->activate( git->end(), git->end() + 1);
auto ait = git->end()-1;
spc.geo.randompos(ait->pos, slump);
spc.geo.boundaryFunc(ait->pos);
d.atoms.push_back( Faunus::distance(git->begin(), ait) ); // index of particle rel. to group
}
std::sort( d.atoms.begin(), d.atoms.end());
change.groups.push_back( d ); // add to list of moved groups
} else {
mollist = spc.findMolecules( m.first, Tspace::INACTIVE);
if ( size(mollist) < m.second ) {
change.dNpart=false;
return; // Not possible to perform change, escape through the back door
}
for ( int N=0; N <m.second; N++ ) {
Change::data d;
auto git = slump.sample(mollist.begin(), mollist.end());
git->activate( git->inactive().begin(), git->inactive().end());
Point oldcm = git->cm;
spc.geo.randompos(oldcm, random);
git->translate( oldcm, spc.geo.boundaryFunc );
oldcm = ranunit(slump);
Eigen::Quaterniond Q( Eigen::AngleAxisd(2*pc::pi*random(), oldcm) );
git->rotate(Q, spc.geo.boundaryFunc);
d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
change.groups.push_back( d ); // add to list of moved groups
mollist = spc.findMolecules( m.first , Tspace::INACTIVE);
}
}
}
std::sort(change.groups.begin(), change.groups.end() );
} else
throw std::runtime_error("No reactions in list, disable speciation or add reactions");
}
double bias(Change &change, double uold, double unew) override {
if (forward)
return -log_k*std::log(10);
return log_k*std::log(10);
} //!< adds extra energy change not captured by the Hamiltonian
void _accept(Change &change) override {
accmap[ trialprocess->name ] += 1;
trialprocess->N_reservoir += (forward == true) ? -1 : 1;
if( trialprocess->N_reservoir < 0 && trialprocess->canonic == true )
throw std::runtime_error("There are no negative number of molecules");
}
void _reject(Change &change) override {
accmap[ trialprocess->name ] += 0;
}
}; // End of class SpeciationMove
template<typename Tspace>
class Cluster : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc;
Average<double> msqd, msqd_angle, N;
double thresholdsq=0, dptrans=0, dprot=0, angle=0, _bias=0;
Point dir={1,1,1}, dp;
std::vector<std::string> names;
std::vector<int> ids;
std::vector<size_t> index; // all possible molecules to move
void _to_json(json &j) const override {
using namespace u8;
j = {
{"threshold", std::sqrt(thresholdsq)}, {"dir", dir}, {"dp", dptrans}, {"dprot", dprot},
{rootof + bracket("r" + squared), std::sqrt(msqd.avg())},
{rootof + bracket(theta + squared) + "/" + degrees, std::sqrt(msqd_angle.avg()) / 1.0_deg},
{bracket("N"), N.avg()}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
dptrans = j.at("dp");
dir = j.value("dir", Point(1,1,1));
dprot = j.at("dprot");
thresholdsq = std::pow(j.at("threshold").get<double>(), 2);
names = j.at("molecules").get<decltype(names)>(); // molecule names
ids = names2ids(molecules<Tpvec>, names); // names --> molids
index.clear();
for (auto &g : spc.groups)
if (!g.atomic)
if (std::find(ids.begin(), ids.end(), g.id)!=ids.end() )
index.push_back( &g-&spc.groups.front() );
if (repeat<0)
repeat = index.size();
}
void findCluster(Tspace &spc, size_t first, std::set<size_t>& cluster) {
std::set<size_t> pool(index.begin(), index.end());
cluster.clear();
cluster.insert(first);
pool.erase(first);
size_t n;
do { // find cluster (not very clever...)
n = cluster.size();
for (size_t i : cluster)
if (!spc.groups[i].empty()) // check if group is inactive
for (size_t j : pool)
if (!spc.groups[j].empty()) // check if group is inactive
if (i!=j)
if (spc.geo.sqdist(spc.groups[i].cm, spc.groups[j].cm)<=thresholdsq) {
cluster.insert(j);
pool.erase(j);
}
} while (cluster.size()!=n);
// check if cluster is too large
double max = spc.geo.getLength().minCoeff()/2;
for (auto i : cluster)
for (auto j : cluster)
if (j>i)
if (spc.geo.sqdist(spc.groups[i].cm, spc.groups[j].cm)>=max*max)
throw std::runtime_error(name+": cluster larger than half box length");
}
void _move(Change &change) override {
if (thresholdsq>0 && !index.empty()) {
std::set<size_t> cluster; // all group index in cluster
size_t first = *slump.sample(index.begin(), index.end()); // random molecule (nuclei)
findCluster(spc, first, cluster); // find cluster around first
N += cluster.size(); // average cluster size
Change::data d;
d.all=true;
dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans;
angle = dprot * (slump()-0.5);
Point COM = Geometry::trigoCom(spc, cluster); // cluster center
Eigen::Quaterniond Q;
Q = Eigen::AngleAxisd(angle, ranunit(slump)); // quaternion
for (auto i : cluster) { // loop over molecules in cluster
auto &g = spc.groups[i];
Geometry::rotate(g.begin(), g.end(), Q, spc.geo.boundaryFunc, -COM);
g.cm = g.cm-COM;
spc.geo.boundary(g.cm);
g.cm = Q*g.cm+COM;
spc.geo.boundary(g.cm);
g.translate( dp, spc.geo.boundaryFunc );
d.index=i;
change.groups.push_back(d);
}
_bias += 0; // one may add bias here...
#ifndef NDEBUG
Point newCOM = Geometry::trigoCom(spc, cluster);
double _zero = std::sqrt( spc.geo.sqdist(COM,newCOM) ) - dp.norm();
if (fabs(_zero)>1)
std::cerr << _zero << " ";
#endif
}
}
double bias(Change &change, double uold, double unew) override {
return _bias;
} //!< adds extra energy change not captured by the Hamiltonian
void _reject(Change &change) override { msqd += 0; msqd_angle += 0; }
void _accept(Change &change) override {
msqd += dp.squaredNorm();
msqd_angle += angle*angle;
}
public:
Cluster(Tspace &spc) : spc(spc) {
cite = "doi:10/cj9gnn";
name = "cluster";
repeat = -1; // meaning repeat N times
}
};
template<typename Tspace>
class Pivot : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
std::vector<std::shared_ptr<Potential::BondData>> bonds;
std::vector<int> index; // atom index to rotate
Tspace& spc;
std::string molname;
int molid;
double dprot;
double d2; // cm movement, squared
Average<double> msqd; // cm mean squared displacement
void _to_json(json &j) const override {
using namespace u8;
j = {
{"molecule", molname}, {"dprot", dprot},
{u8::rootof + u8::bracket("r_cm" + u8::squared), std::sqrt(msqd.avg())}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
dprot = j.at("dprot");
molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
bonds = Potential::filterBonds(
molecules<Tpvec>[molid].bonds, Potential::BondData::HARMONIC);
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end()); // repeat for each molecule...
if (repeat>0)
repeat *= bonds.size();
}
}
void _move(Change &change) override {
d2=0;
if (std::fabs(dprot)>1e-9) {
auto g = spc.randomMolecule(molid, slump); // look for random group
if (g!=spc.groups.end())
if (g->size()>2) { // must at least have three atoms
auto b = slump.sample(bonds.begin(), bonds.end()); // random harmonic bond
if (b != bonds.end()) {
int i1 = (*b)->index.at(0);
int i2 = (*b)->index.at(1);
int offset = std::distance( spc.p.begin(), g->begin() );
index.clear();
if (slump()>0.5)
for (size_t i=i2+1; i<g->size(); i++)
index.push_back(i+offset);
else
for (int i=0; i<i1; i++)
index.push_back(i+offset);
i1+=offset;
i2+=offset;
if (!index.empty()) {
Point oldcm = g->cm;
g->unwrap(spc.geo.distanceFunc); // remove pbc
Point u = (spc.p[i1].pos - spc.p[i2].pos).normalized();
double angle = dprot * (slump()-0.5);
Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) );
auto M = Q.toRotationMatrix();
for (auto i : index) {
spc.p[i].rotate(Q, M); // internal rot.
spc.p[i].pos = Q * ( spc.p[i].pos - spc.p[i1].pos)
+ spc.p[i1].pos; // positional rot.
}
g->cm = Geometry::massCenter(g->begin(), g->end());
g->wrap(spc.geo.boundaryFunc); // re-apply pbc
d2 = spc.geo.sqdist(g->cm, oldcm); // CM movement
Change::data d;
d.index = Faunus::distance( spc.groups.begin(), g ); // integer *index* of moved group
d.all = d.internal = true; // trigger internal interactions
change.groups.push_back( d ); // add to list of moved groups
}
}
}
}
}
void _accept(Change &change) override { msqd += d2; }
void _reject(Change &change) override { msqd += 0; }
public:
Pivot(Tspace &spc) : spc(spc) {
name = "pivot";
repeat = -1; // --> repeat=N
}
}; //!< Pivot move around random harmonic bond axis
#ifdef ENABLE_MPI
/**
* @brief Class for parallel tempering (aka replica exchange) using MPI
*
* Although not completely correct, the recommended way of performing a temper move
* is to do `N` Monte Carlo passes with regular moves and then do a tempering move.
* This is because the MPI nodes must be in sync and if you have a system where
* the random number generator calls are influenced by the Hamiltonian we could
* end up in a deadlock.
*
* @date Lund 2012, 2018
*/
template<class Tspace>
class ParallelTempering : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
Tspace& spc; // Space to operate on
MPI::MPIController& mpi;
int partner; //!< Exchange replica (partner)
enum extradata {VOLUME=0}; //!< Structure of extra data to send
std::map<std::string, Average<double>> accmap;
MPI::FloatTransmitter ft; //!< Class for transmitting floats over MPI
MPI::ParticleTransmitter<Tpvec> pt;//!< Class for transmitting particles over MPI
void findPartner() {
int dr=0;
partner = mpi.rank();
(mpi.random()>0.5) ? dr++ : dr--;
(mpi.rank() % 2 == 0) ? partner+=dr : partner-=dr;
} //!< Find replica to exchange with
bool goodPartner() {
assert(partner!=mpi.rank() && "Selfpartner! This is not supposed to happen.");
if (partner>=0)
if ( partner<mpi.nproc() )
if ( partner!=mpi.rank() )
return true;
return false;
} //!< Is partner valid?
void _to_json(json &j) const override {
j = {
{ "replicas", mpi.nproc() },
{ "datasize", pt.getFormat() }
};
json &_j = j["exchange"];
_j = json::object();
for (auto &m : accmap)
_j[m.first] = {
{"attempts", m.second.cnt},
{"acceptance", m.second.avg()}
};
}
void _move(Change &change) override {
double Vold = spc.geo.getVolume();
findPartner();
Tpvec p; // temperary storage
p.resize(spc.p.size());
if (goodPartner()) {
change.all=true;
pt.sendExtra[VOLUME]=Vold; // copy current volume for sending
pt.recv(mpi, partner, p); // receive particles
pt.send(mpi, spc.p, partner); // send everything
pt.waitrecv();
pt.waitsend();
double Vnew = pt.recvExtra[VOLUME];
if (Vnew<1e-9 || spc.p.size() != p.size())
MPI_Abort(mpi.comm, 1);
if (std::fabs(Vnew-Vold)>1e-9)
change.dV=true;
spc.p = p;
spc.geo.setVolume(Vnew);
// update mass centers
for (auto& g : spc.groups)
if (g.atomic==false)
g.cm = Geometry::massCenter(g.begin(), g.end(),
spc.geo.boundaryFunc, -g.begin()->pos);
}
}
double exchangeEnergy(double mydu) {
std::vector<MPI::FloatTransmitter::floatp> duSelf(1), duPartner;
duSelf[0]=mydu;
duPartner = ft.swapf(mpi, duSelf, partner);
return duPartner.at(0); // return partner energy change
} //!< Exchange energy with partner
double bias(Change &change, double uold, double unew) override {
return exchangeEnergy(unew-uold); // Exchange dU with partner (MPI)
}
std::string id() {
std::ostringstream o;
if (mpi.rank() < partner)
o << mpi.rank() << " <-> " << partner;
else
o << partner << " <-> " << mpi.rank();
return o.str();
} //!< Unique string to identify set of partners
void _accept(Change &change) override {
if ( goodPartner() )
accmap[ id() ] += 1;
}
void _reject(Change &change) override {
if ( goodPartner() )
accmap[ id() ] += 0;
}
void _from_json(const json &j) override {
pt.setFormat( j.value("format", std::string("XYZQI") ) );
}
public:
ParallelTempering(Tspace &spc, MPI::MPIController &mpi ) : spc(spc), mpi(mpi) {
name="temper";
partner=-1;
pt.recvExtra.resize(1);
pt.sendExtra.resize(1);
}
};
#endif
template<typename Tspace>
class Propagator : public BasePointerVector<Movebase> {
private:
int _repeat;
std::discrete_distribution<> dist;
std::vector<double> w; // list of weights for each move
void addWeight(double weight=1) {
w.push_back(weight);
dist = std::discrete_distribution<>(w.begin(), w.end());
_repeat = int(std::accumulate(w.begin(), w.end(), 0.0));
}
public:
using BasePointerVector<Movebase>::vec;
inline Propagator() {}
inline Propagator(const json &j, Tspace &spc, MPI::MPIController &mpi) {
if (j.count("random")==1)
Movebase::slump = j["random"]; // slump is static --> shared for all moves
for (auto &m : j.at("moves")) {// loop over move list
size_t oldsize = vec.size();
for (auto it : m.items()) {
try {
#ifdef ENABLE_MPI
if (it.key()=="temper") this->template push_back<Move::ParallelTempering<Tspace>>(spc, mpi);
#endif
if (it.key()=="moltransrot") this->template push_back<Move::TranslateRotate<Tspace>>(spc);
if (it.key()=="conformationswap") this->template push_back<Move::ConformationSwap<Tspace>>(spc);
if (it.key()=="transrot") this->template push_back<Move::AtomicTranslateRotate<Tspace>>(spc);
if (it.key()=="pivot") this->template push_back<Move::Pivot<Tspace>>(spc);
if (it.key()=="volume") this->template push_back<Move::VolumeMove<Tspace>>(spc);
if (it.key()=="speciation") this->template push_back<Move::SpeciationMove<Tspace>>(spc);
if (it.key()=="cluster") this->template push_back<Move::Cluster<Tspace>>(spc);
if (vec.size()==oldsize+1) {
vec.back()->from_json( it.value() );
addWeight(vec.back()->repeat);
} else
std::cerr << "warning: ignoring unknown move '" << it.key() << "'" << endl;
} catch (std::exception &e) {
throw std::runtime_error("Error adding move '" + it.key() + "': " + e.what());
}
}
}
}
int repeat() { return _repeat; }
auto sample() {
if (!vec.empty()) {
assert(w.size() == vec.size());
return vec.begin() + dist( Move::Movebase::slump.engine );
}
return vec.end();
} //!< Pick move from a weighted, random distribution
};
}//Move namespace
template<class Tgeometry, class Tparticle>
class MCSimulation {
private:
typedef Space<Tgeometry, Tparticle> Tspace;
typedef typename Tspace::Tpvec Tpvec;
bool metropolis(double du) const {
if (std::isnan(du))
throw std::runtime_error("Metropolis error: energy cannot be NaN");
if (du<0)
return true;
return ( Move::Movebase::slump() > std::exp(-du)) ? false : true;
} //!< Metropolis criterion (true=accept)
struct State {
Tspace spc;
Energy::Hamiltonian<Tspace> pot;
State(const json &j) : spc(j), pot(spc,j) {}
void sync(State &other, Change &change) {
spc.sync( other.spc, change );
pot.sync( &other.pot, change );
}
}; //!< Contains everything to describe a state
State state1, // old state
state2; // new state (trial);
double uinit=0, dusum=0;
Average<double> uavg;
void init() {
dusum=0;
Change c; c.all=true;
state1.pot.key = Energy::Energybase::OLD; // this is the old energy (current)
state2.pot.key = Energy::Energybase::NEW; // this is the new energy (trial)
state1.pot.init();
uinit = state1.pot.energy(c);
state2.sync(state1, c);
state2.pot.init();
// Hack in reference to state1 in speciation
for (auto base : moves.vec) {
auto derived = std::dynamic_pointer_cast<Move::SpeciationMove<Tspace>>(base);
if (derived)
derived->setOther(state1.spc);
}
#ifndef NDEBUG
double u2 = state2.pot.energy(c);
double error = std::fabs(uinit-u2);
if (uinit!=0)
assert(error/uinit<1e-3);
else
assert(error<1e-6);
//cout << "u1 = " << uinit << " u2 = " << u2 << endl;
//assert( std::fabs((uinit-u2)/uinit)<1e-3 );
#endif
}
public:
Move::Propagator<Tspace> moves;
auto& pot() { return state1.pot; }
auto& space() { return state1.spc; }
const auto& pot() const { return state1.pot; }
const auto& space() const { return state1.spc; }
const auto& geometry() const { return state1.spc.geo; }
const auto& particles() const { return state1.spc.p; }
double drift() {
Change c; c.all=true;
double ufinal = state1.pot.energy(c);
return ( ufinal-(uinit+dusum) ) / uinit;
} //!< Calculates the relative energy drift from initial configuration
MCSimulation(const json &j, MPI::MPIController &mpi) : state1(j), state2(j), moves(j, state2.spc, mpi) {
init();
}
void store(json &j) const {
j = state1.spc;
j["random-move"] = Move::Movebase::slump;
j["random-global"] = Faunus::random;
} // store system to json object
void restore(const json &j) {
state1.spc = j;
state2.spc = j;
Move::Movebase::slump = j["random-move"]; // restore move random number generator
Faunus::random = j["random-global"]; // restore global random number generator
//reactions<Tpvec> = j.at("reactionlist").get<decltype(reactions<Tpvec>)>(); // should be handled by space
init();
} //!< restore system from previously store json object
void move() {
Change change;
for (int i=0; i<moves.repeat(); i++) {
auto mv = moves.sample(); // pick random move
if (mv != moves.end() ) {
change.clear();
(**mv).move(change);
if (!change.empty()) {
double unew, uold, du;
#pragma omp parallel sections
{
#pragma omp section
{ unew = state2.pot.energy(change); }
#pragma omp section
{ uold = state1.pot.energy(change); }
}
du = unew - uold;
// if any energy returns NaN (from i.e. division by zero), the
// configuration will always be rejected, or if moving from NaN
// to a finite energy, always accepted.
if (std::isnan(uold) and not std::isnan(unew))
du = -pc::infty; // accept
else if (std::isnan(unew))
du = pc::infty; // reject
// if the difference in energy is NaN (from i.e. infinity minus infinity), the
// configuration will always be accepted. This should be
// noted during equilibration.
else if (std::isnan(du))
du = 0; // accept
double bias = (**mv).bias(change, uold, unew) + Nchem( state2.spc, state1.spc , change);
if ( metropolis(du + bias) ) { // accept move
state1.sync( state2, change );
(**mv).accept(change);
}
else { // reject move
state2.sync( state1, change );
(**mv).reject(change);
du=0;
}
dusum+=du; // sum of all energy changes
}
}
}
}
void to_json(json &j) {
j = state1.spc.info();
j["temperature"] = pc::temperature / 1.0_K;
j["moves"] = moves;
j["energy"].push_back(state1.pot);
}
};
template<class Tgeometry, class Tparticle>
void to_json(json &j, MCSimulation<Tgeometry,Tparticle> &mc) {
mc.to_json(j);
}
/**
* @brief add documentation.....
*
* @f[
* \beta U = \ln ( \sum N_o!/N_n! \exp([N_n - N_o]\beta \mu) V^{N_n - N_o} )
* @f]
*
* @todo
* - Rename to something more descriptive
* - use exception message to suggest how to fix the problem
*/
template<typename Tspace>
double Nchem( Tspace &spc_n, Tspace &spc_o, const Change &change) {
using Tpvec = typename Tspace::Tpvec;
double NoverO=0;
if ( change.dNpart ) {// Have the number of any molecules changed
for ( auto &m : change.groups ) {
int N_o = 0;
int N_n = 0;
if (!m.dNpart)
if (!molecules<Tpvec>[ spc_n.groups[m.index].id ].atomic) { // Molecular species
auto mollist_n = spc_n.findMolecules(m.index, Tspace::ACTIVE);
auto mollist_o = spc_o.findMolecules(m.index, Tspace::ACTIVE);
N_n=size(mollist_n);
N_o=size(mollist_o);
}
if ( m.dNpart ) {
auto mollist_n = spc_n.findMolecules(spc_n.groups[m.index].id, Tspace::ALL);
auto mollist_o = spc_o.findMolecules(spc_o.groups[m.index].id, Tspace::ALL);
if ( size(mollist_n) > 1 || size(mollist_o) > 1 )
throw std::runtime_error("Bad definition: One group per atomic molecule!");
// Below is safe due to the catches above
// add consistency criteria with m.atoms.size() == N
N_n = mollist_n.begin()->size();
N_o = mollist_o.begin()->size();
}
int dN = N_n - N_o;
if (dN!=0) {
double V_n = spc_n.geo.getVolume();
double V_o = spc_o.geo.getVolume();
double betamu = molecules<Tpvec>[ spc_n.groups[m.index].id ].activity;
// todo: add runtime error if activity <=0 ?
if (betamu > 1e-20)
betamu = std::log( betamu / 1.0_molar );
if (dN>0)
for (int n=0; n < dN; n++)
NoverO += -std::log( (N_o + 1 + n) / ( V_n * 1.0_molar )) + betamu;
else if (dN<0)
for (int n=0; n < (-dN); n++)
NoverO += std::log( (N_o - n) / ( V_n * 1.0_molar )) - betamu;
}
}
}
return -NoverO; // negative sign since Pref exp{-beta(dU)} = exp{-beta(dU -ln(Pref)}
}
}//Faunus namespace
|
nbody.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
/*
* pRNG based on http://www.cs.wm.edu/~va/software/park/park.html
*/
#define MODULUS 2147483647
#define MULTIPLIER 48271
#define DEFAULT 123456789
static long seed = DEFAULT;
double Random(void)
/* ----------------------------------------------------------------
* Random returns a pseudo-random real number uniformly distributed
* between 0.0 and 1.0.
* ----------------------------------------------------------------
*/
{
const long Q = MODULUS / MULTIPLIER;
const long R = MODULUS % MULTIPLIER;
long t;
t = MULTIPLIER * (seed % Q) - R * (seed / Q);
if (t > 0)
seed = t;
else
seed = t + MODULUS;
return ((double) seed / MODULUS);
}
/*
* End of the pRNG algorithm
*/
typedef struct {
double x, y, z;
double mass;
} Particle;
typedef struct {
double xold, yold, zold;
double fx, fy, fz;
} ParticleV;
void InitParticles( Particle[], ParticleV [], int );
double ComputeForces( Particle [], Particle [], ParticleV [], int );
double ComputeNewPos( Particle [], ParticleV [], int, double);
int main()
{
double time;
Particle * particles; /* Particles */
ParticleV * pv; /* Particle velocity */
int npart, i, j;
int cnt; /* number of times in loop */
double sim_t; /* Simulation time */
int tmp;
tmp = fscanf(stdin,"%d\n",&npart);
tmp = fscanf(stdin,"%d\n",&cnt);
/* Allocate memory for particles */
particles = (Particle *) malloc(sizeof(Particle)*npart);
pv = (ParticleV *) malloc(sizeof(ParticleV)*npart);
/* Generate the initial values */
InitParticles( particles, pv, npart);
sim_t = 0.0;
while (cnt--) {
sim_t += ComputeNewPos( particles, pv, npart, ComputeForces( particles, particles, pv, npart ));
}
for (i=0; i<npart; i++)
fprintf(stdout,"%.5lf %.5lf %.5lf\n", particles[i].x, particles[i].y, particles[i].z);
return 0;
}
void InitParticles( Particle particles[], ParticleV pv[], int npart )
{
int i;
#pragma omp parallel for schedule(static)
for (i=0; i<npart; i++) {
particles[i].x = Random();
particles[i].y = Random();
particles[i].z = Random();
particles[i].mass = 1.0;
pv[i].xold = particles[i].x;
pv[i].yold = particles[i].y;
pv[i].zold = particles[i].z;
pv[i].fx = 0;
pv[i].fy = 0;
pv[i].fz = 0;
}
}
double ComputeForces(Particle myparticles[], Particle others[], ParticleV pv[], int npart)
{
double max_f;
int i;
max_f = 0.0;
#pragma omp parallel for schedule(static)
for (i=0; i<npart; i++) {
int j;
double xi, yi, mi, rx, ry, mj, r, fx, fy, rmin;
rmin = 100.0;
xi = myparticles[i].x;
yi = myparticles[i].y;
fx = 0.0;
fy = 0.0;
#pragma omp parallel for schedule(static)
for (j=0; j<npart; j++) {
rx = xi - others[j].x;
ry = yi - others[j].y;
mj = others[j].mass;
r = rx * rx + ry * ry;
/* ignore overlap and same particle */
if (r == 0.0) continue;
if (r < rmin) rmin = r;
r = r * sqrt(r);
fx -= mj * rx / r;
fy -= mj * ry / r;
}
pv[i].fx += fx;
pv[i].fy += fy;
fx = sqrt(fx*fx + fy*fy)/rmin;
if (fx > max_f) max_f = fx;
}
return max_f;
}
double ComputeNewPos( Particle particles[], ParticleV pv[], int npart, double max_f)
{
int i;
double a0, a1, a2;
static double dt_old = 0.001, dt = 0.001;
double dt_new;
a0 = 2.0 / (dt * (dt + dt_old));
a2 = 2.0 / (dt_old * (dt + dt_old));
a1 = -(a0 + a2);
#pragma omp parallel for schedule(static)
for (i=0; i<npart; i++) {
double xi, yi;
xi = particles[i].x;
yi = particles[i].y;
particles[i].x = (pv[i].fx - a1 * xi - a2 * pv[i].xold) / a0;
particles[i].y = (pv[i].fy - a1 * yi - a2 * pv[i].yold) / a0;
pv[i].xold = xi;
pv[i].yold = yi;
pv[i].fx = 0;
pv[i].fy = 0;
}
dt_new = 1.0/sqrt(max_f);
/* Set a minimum: */
if (dt_new < 1.0e-6) dt_new = 1.0e-6;
/* Modify time step */
if (dt_new < dt) {
dt_old = dt;
dt = dt_new;
}
else if (dt_new > 4.0 * dt) {
dt_old = dt;
dt *= 2.0;
}
return dt_old;
}
|
sparseraster.h | #pragma once
#include "gdx/cell.h"
#include "gdx/cpupredicates-private.h"
#include "gdx/exception.h"
#include "gdx/nodatapredicates-private.h"
#include "gdx/rasterchecks.h"
#include "gdx/rastermetadata.h"
#include "gdx/sparserasteriterator.h"
#include "infra/cast.h"
#include "infra/span.h"
#include <Eigen/SparseCore>
#include <algorithm>
#include <cassert>
#include <vector>
namespace gdx {
template <typename T>
class SparseRaster
{
public:
using value_type = T;
using size_type = int32_t;
using data_type = Eigen::SparseMatrix<T, Eigen::RowMajor>;
using nodata_type = std::optional<value_type>;
using pointer = T*;
using const_pointer = const T*;
using iterator = SparseMatrixIterator<T, false>;
using const_iterator = SparseMatrixIterator<T, true>;
static constexpr bool raster_type_has_nan = std::numeric_limits<T>::has_quiet_NaN;
static constexpr T NaN = std::numeric_limits<T>::quiet_NaN();
static constexpr bool has_nan()
{
return raster_type_has_nan;
}
SparseRaster() = default;
SparseRaster(int32_t rows, int32_t cols)
: _meta(rows, cols)
, _data(rows, cols)
{
}
SparseRaster(RasterMetadata meta)
: _meta(std::move(meta))
, _data(_meta.rows, _meta.cols)
{
throwOnInvalidMetadata();
}
SparseRaster(int32_t rows, int32_t cols, T fillValue)
: SparseRaster(RasterMetadata(rows, cols), fillValue)
{
}
SparseRaster(const RasterMetadata& meta, T fillValue)
: _meta(meta)
, _data(meta.rows, meta.cols)
{
if constexpr (raster_type_has_nan) {
// make sure we fill tha raster with NaNs if the fill value is the nodata value
if (_meta.nodata.has_value() && fillValue == static_cast<T>(*_meta.nodata)) {
fillValue = NaN;
}
}
fill(fillValue);
}
SparseRaster(int32_t rows, int32_t cols, std::span<const T> data)
: SparseRaster(RasterMetadata(rows, cols), data)
{
}
SparseRaster(const RasterMetadata& meta, std::span<const T> data)
: _meta(meta)
, _data(meta.rows, meta.cols)
{
throwOnInvalidMetadata();
throwOnDataSizeMismatch(meta.rows, meta.cols, data.size());
initMatrixValues(data);
}
SparseRaster(const RasterMetadata& meta, data_type&& data)
: _meta(meta)
, _data(data)
{
}
SparseRaster(const SparseRaster<T>& other)
: _meta(other._meta)
, _data(other._data)
{
fmt::print("!! Raster copy: should not happen !!");
}
SparseRaster(SparseRaster<T>&&) = default;
SparseRaster& operator=(SparseRaster<T>&&) = default;
SparseRaster& operator=(const SparseRaster<T>& other)
{
if (this != &other) {
_meta = other._meta;
_data = other._data;
}
return *this;
}
void resize_and_fill(int32_t rows, int32_t cols, value_type value)
{
resize(rows, cols);
fill(value);
}
void resize(int32_t rows, int32_t cols)
{
_meta.rows = rows;
_meta.cols = cols;
_data.resize(rows, cols);
}
void resize(int32_t rows, int32_t cols, std::optional<double> nodata)
{
_meta.rows = rows;
_meta.cols = cols;
_meta.nodata = nodata;
_data.resize(rows, cols);
throwOnInvalidMetadata();
}
void set_metadata(RasterMetadata meta)
{
if (meta.rows * meta.cols != size()) {
throw InvalidArgument("Cannot change metadata: invalid size");
}
_meta = std::move(meta);
}
SparseRaster<T> copy() const
{
SparseRaster<T> dst(_meta);
dst._data = _data;
return dst;
}
iterator begin()
{
return iterator(_data, nodata().value_or(std::numeric_limits<T>::max()));
}
const_iterator begin() const
{
return const_iterator(_data, nodata().value_or(std::numeric_limits<T>::max()));
}
const_iterator cbegin() const
{
return begin();
}
iterator end()
{
return iterator();
}
const_iterator end() const
{
return const_iterator();
}
const_iterator cend() const
{
return end();
}
auto value_begin()
{
return SparseMatrixValueIterator<value_type, false>(_data);
}
auto value_begin() const
{
return SparseMatrixValueIterator<value_type, true>(_data);
}
auto value_end()
{
return SparseMatrixValueIterator<value_type, false>();
}
auto value_end() const
{
return SparseMatrixValueIterator<value_type, true>();
}
auto value_cend()
{
return SparseMatrixValueIterator<value_type, true>();
}
auto value_cbegin() const
{
return value_begin();
}
auto value_cend() const
{
return value_end();
}
bool has_nodata() const noexcept
{
return _data.nonZeros() != 0;
}
std::optional<T> nodata() const noexcept
{
return inf::optional_cast<T>(_meta.nodata);
}
std::size_t size() const noexcept
{
return _data.size();
}
std::ptrdiff_t ssize() const noexcept
{
assert(_data.size() <= std::numeric_limits<std::ptrdiff_t>::max());
return static_cast<std::ptrdiff_t>(_data.size());
}
void collapse_data()
{
// no collapse needed
}
const RasterMetadata& metadata() const noexcept
{
return _meta;
}
void set_projection(int32_t epsg)
{
_meta.set_projection_from_epsg(epsg);
}
void set_nodata(double newValue)
{
if constexpr (!raster_type_has_nan) {
if (std::isnan(newValue)) {
throw InvalidArgument("Nodata value cannot be NaN for integral rasters");
}
}
_meta.nodata = newValue;
}
/*void replaceNodata(double newValue)
{
if constexpr (!raster_type_has_nan) {
if (std::isnan(newValue)) {
throw InvalidArgument("Integral rasters cannot have NaN values");
}
}
}*/
/*void turn_value_into_nodata(T value)
{
const auto dataSize = _data.size();
for (int i = 0; i < dataSize; ++i) {
if (_data(i) == value) {
mark_as_nodata(i);
}
}
}*/
// assigns the value to all the elements of the raster, even nodata
void fill(value_type value)
{
// TODO
_data.fill(value);
}
// assigns the value to all the elements of the raster, leaving nodata values intact
void fill_values(value_type value)
{
_data.fill(value);
}
// Makes all elements of the raster nodata values
void fill_with_nodata()
{
_data.setZero();
}
int32_t rows() const noexcept
{
assert(_meta.rows == _data.rows());
return _meta.rows;
}
int32_t cols() const noexcept
{
assert(_meta.cols == _data.cols());
return _meta.cols;
}
void mark_as_data(int32_t /*index*/) noexcept
{
}
void mark_as_data(Cell /*cell*/) noexcept
{
}
void mark_as_data(int32_t /*row*/, int32_t /*col*/) noexcept
{
}
void mark_as_nodata(int32_t index)
{
auto [r, c] = indexToRowCol(index);
mark_as_nodata(r, c);
}
void mark_as_nodata(int32_t row, int32_t col)
{
_data.prune([=](const int32_t& r, const int32_t& c, const T& /*value*/) {
return row == r && col == c;
});
}
std::optional<value_type> optional_value(int32_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<value_type>();
} else {
return _data.coeff();
}
}
template <typename VarType>
std::optional<VarType> optional_value_as(int32_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<VarType>();
} else {
return static_cast<VarType>(_data(index));
}
}
bool is_nodata_value(T value) const noexcept
{
return value == nodata();
}
bool is_nodata(int32_t index) const noexcept
{
auto [row, col] = indexToRowCol(index);
return is_nodata(row, col);
}
bool is_nodata(const Cell& cell) const noexcept
{
return is_nodata(cell.r, cell.c);
}
bool is_nodata(int32_t r, int32_t c) const noexcept
{
auto* innerNonZeros = _data.innerNonZeroPtr();
auto* outerIndex = _data.outerIndexPtr();
auto nod = nodata().value();
auto end = innerNonZeros ? outerIndex[r] + innerNonZeros[r] : outerIndex[r + 1];
return _data.data().atInRange(outerIndex[r], end, typename data_type::StorageIndex(c), nod) == nod;
}
// bool tolerant_equal_to(const SparseRaster<T>& other, value_type tolerance = std::numeric_limits<value_type>::epsilon()) const noexcept
// {
// if (_meta != other._meta) {
// return false;
// }
// return tolerant_data_equal_to(other, tolerance);
// }
// bool tolerant_data_equal_to(const SparseRaster<T>& other, value_type relTolerance = value_type(1e-05)) const noexcept
// {
// throw_on_size_mismatch(*this, other);
// return _data == other._data;
// }
bool operator==(const SparseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
return (_data - other._data).norm() == 0;
}
bool operator!=(const SparseRaster<T>& other) const noexcept
{
return !(*this == other);
}
/*SparseRaster<uint8_t> not_equals(const SparseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
return performBinaryOperation<nodata::not_equal_to>(other);
}
template <typename TValue>
SparseRaster<uint8_t> not_equals(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return performUnaryOperation<nodata::not_equal_to>(value);
}*/
template <typename TOther>
auto operator+(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return SparseRaster(_meta, _data + other._data);
}
template <typename TValue>
auto operator+(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
SparseRaster rasterCopy = copy();
rasterCopy += value;
return rasterCopy;
}
SparseRaster<T>& operator+=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
std::for_each(value_begin(), value_end(), [=](T& cellValue) {
cellValue += static_cast<T>(value);
});
return *this;
}
template <typename TOther>
SparseRaster<T>& operator+=(const SparseRaster<TOther>& other)
{
_data += other._data;
return *this;
}
SparseRaster<T> operator-() const
{
if constexpr (std::is_unsigned_v<T>) {
throw RuntimeError("Minus operator applied to unsigned value");
} else {
return SparseRaster<T>(_meta, -_data);
}
}
template <typename TOther>
auto operator-(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return SparseRaster<T>(_meta, _data - other._data);
}
template <typename TValue>
auto operator-(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
SparseRaster rasterCopy = copy();
rasterCopy -= value;
return rasterCopy;
}
SparseRaster<T>& operator-=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
std::for_each(value_begin(), value_end(), [=](T& cellValue) {
cellValue -= static_cast<T>(value);
});
return *this;
}
template <typename TOther>
auto operator*(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return SparseRaster<T>(_meta, _data * other._data);
}
template <typename TValue>
auto operator*(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return SparseRaster(_meta, _data * static_cast<T>(value));
}
SparseRaster<T>& operator*=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
_data *= value;
return *this;
}
//template <typename TOther>
//auto operator/(const SparseRaster<TOther>& other) const
//{
// throw_on_size_mismatch(*this, other);
// using TResult = decltype(0.f * TOther()); // use float or double as result type
// SparseRaster<TResult> result(_meta);
// if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) {
// result.set_nodata(*other.metadata().nodata);
// }
// if (!result.nodata().has_value()) {
// result.set_nodata(std::numeric_limits<TResult>::quiet_NaN());
// }
// TResult nodata = result.nodata().value();
// if constexpr (std::numeric_limits<TResult>::has_quiet_NaN) {
// nodata = std::numeric_limits<TResult>::quiet_NaN();
// }
// auto operation = nodata::divides<TResult>(_meta.nodata, other.metadata().nodata);
// for (int32_t i = 0; i < size(); ++i) {
// auto v = other[i];
// if (v == 0) {
// result[i] = nodata;
// } else {
// if (is_nodata(i) || other.is_nodata(i)) {
// result[i] = nodata;
// } else {
// result[i] = static_cast<TResult>(_data(i)) / other[i];
// }
// }
// }
// return result;
//}
template <typename TValue>
auto operator/(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
if (value == 0) {
throw InvalidArgument("Division by zero");
}
return SparseRaster(_meta, _data / static_cast<T>(value));
}
SparseRaster<T>& operator/=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
_data /= value;
return *this;
}
value_type& operator[](int32_t index)
{
auto [row, col] = indexToRowCol(index);
return (*this)(row, col);
}
value_type operator[](int32_t index) const
{
auto [row, col] = indexToRowCol(index);
return (*this)(row, col);
}
value_type& operator[](const Cell& cell)
{
return (*this)(cell.r, cell.c);
}
value_type operator[](const Cell& cell) const
{
return (*this)(cell.r, cell.c);
}
value_type& operator()(int32_t row, int32_t col)
{
return _data.coeffRef(row, col);
}
value_type operator()(int32_t row, int32_t col) const
{
return _data.coeff(row, col);
}
/*SparseRaster<uint8_t> operator!() const
{
return performUnaryOperation<nodata::logical_not>();
}
template <typename TOther>
SparseRaster<uint8_t> operator&&(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::logical_and>(other);
}
template <typename TOther>
SparseRaster<uint8_t> operator||(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::logical_or>(other);
}
template <typename TOther>
SparseRaster<uint8_t> operator>(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::greater>(other);
}
SparseRaster<uint8_t> operator>(T threshold) const
{
return performUnaryOperation<nodata::greater>(threshold);
}
template <typename TOther>
SparseRaster<uint8_t> operator>=(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::greater_equal>(other);
}
SparseRaster<uint8_t> operator>=(T threshold) const
{
return performUnaryOperation<nodata::greater_equal>(threshold);
}
template <typename TOther>
SparseRaster<uint8_t> operator<(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::less>(other);
}
SparseRaster<uint8_t> operator<(T threshold) const
{
return performUnaryOperation<nodata::less>(threshold);
}
template <typename TOther>
SparseRaster<uint8_t> operator<=(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::less_equal>(other);
}
SparseRaster<uint8_t> operator<=(T threshold) const
{
return performUnaryOperation<nodata::less_equal>(threshold);
}*/
void replace(T oldValue, T newValue) noexcept
{
std::replace(begin(), end(), oldValue, newValue);
}
std::string to_string() const
{
std::ostringstream ss;
ss << _data;
return ss.str();
}
private:
std::tuple<int32_t, int32_t> indexToRowCol(int32_t index) const
{
int row = index / inf::truncate<int>(_data.cols());
int col = index - (row * inf::truncate<int>(_data.cols()));
return {row, col};
}
void initMatrixValues(std::span<const T> data)
{
assert(nodata().has_value());
const T nod = nodata().value();
std::vector<Eigen::Triplet<T>> tripletList;
for (int r = 0; r < _meta.rows; ++r) {
const int rowStart = r * _meta.cols;
for (int c = 0; c < _meta.cols; ++c) {
if (data[rowStart + c] != nod) {
tripletList.push_back(Eigen::Triplet<T>(r, c, data[rowStart + c]));
}
}
}
_data.setFromTriplets(tripletList.begin(), tripletList.end());
}
void throwOnInvalidMetadata()
{
if (!_meta.nodata.has_value()) {
throw RuntimeError("Sparse rasters must have a nodata value");
}
}
static void throwOnDataSizeMismatch(int32_t rows, int32_t cols, size_t dataSize)
{
if (static_cast<size_t>(rows * cols) != dataSize) {
throw InvalidArgument("Raster data size does not match provided dimensions {} vs {}x{}", dataSize, rows, cols);
}
}
// Performs a unary operation on all the elements that results in true or false
template <template <typename> typename BinaryPredicate, typename TOther>
SparseRaster<uint8_t> perform_unary_operation(TOther value) const
{
SparseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value()) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
auto pred = BinaryPredicate<T>(_meta.nodata, std::optional<double>());
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(_data(i), static_cast<T>(value));
}
return result;
}
template <template <typename> typename UnaryPredicate>
SparseRaster<uint8_t> performUnaryOperation() const
{
SparseRaster<uint8_t> result(_meta);
if (_meta.nodata) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
std::transform(cbegin(), cend(), result.begin(), UnaryPredicate<T>(_meta.nodata));
return result;
}
template <template <typename> typename BinaryPredicate, typename TOther>
SparseRaster<uint8_t> perform_binary_operation(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
using WidestType = decltype(T() * TOther());
SparseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value() || other.metadata().nodata.has_value()) {
result.set_nodata(std::numeric_limits<uint8_t>::max());
}
auto pred = BinaryPredicate<WidestType>(_meta.nodata, other.metadata().nodata);
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i]));
}
return result;
}
template <template <typename> typename UnaryPredicate, typename TScalar>
auto perform_scalar_operation(TScalar scalar) const
{
using WidestType = decltype(T() * TScalar());
auto pred = UnaryPredicate<WidestType>(_meta.nodata, static_cast<WidestType>(scalar));
SparseRaster<WidestType> result(_meta);
std::transform(cbegin(), cend(), result.begin(), [this, pred](T value) {
if (is_nodata_value(value)) {
return value;
}
return pred(value);
});
return result;
}
template <template <typename> typename BinaryPredicate, typename TOther>
auto perform_raster_operation(const SparseRaster<TOther>& other) const
{
using WidestType = decltype(T() * TOther());
SparseRaster<WidestType> result(_meta);
if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) {
result.set_nodata(*other.metadata().nodata);
}
auto operation = BinaryPredicate<WidestType>();
auto nodata = result.nodata().value_or(0);
if constexpr (std::numeric_limits<WidestType>::has_quiet_NaN) {
nodata = std::numeric_limits<WidestType>::quiet_NaN();
}
#pragma omp parallel for
for (std::size_t i = 0; i < size(); ++i) {
if (is_nodata(i) || other.is_nodata(i)) {
result[i] = nodata;
} else {
result[i] = operation(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i]));
}
}
return result;
}
RasterMetadata _meta;
data_type _data;
};
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
SparseRaster<T> operator+(TScalar lhs, const SparseRaster<T>& rhs)
{
return rhs + lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator-(TScalar value, const SparseRaster<T>& rhs)
{
using ResultType = decltype(TScalar() - T());
SparseRaster<ResultType> result(rhs.metadata());
std::transform(begin(rhs), end(rhs), begin(result), nodata::minus_scalar_first<ResultType>(rhs.metadata().nodata, static_cast<ResultType>(value)));
return result;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
SparseRaster<T> operator*(TScalar lhs, const SparseRaster<T>& rhs)
{
return rhs * lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator/(TScalar scalar, const SparseRaster<T>& rhs)
{
//throw_on_size_mismatch(other);
//// For nan nodata, standard eigen operator can be used
//if constexpr (typeHasNaN() && std::is_same_v<T, TOther>) {
// // all types are the same, no casts needed
// return SparseRaster<T>(_meta, _data / other._data);
//}
//return performRasterOperation<nodata::divides>(other);
using ResultType = decltype(1.0f * T());
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
SparseRaster<ResultType> result(rhs.metadata());
for (std::size_t i = 0; i < rhs.size(); ++i) {
auto value = rhs[i];
if (value == 0) {
if (!result.nodata().has_value()) {
throw InvalidArgument("Division by raster that contains 0 values");
}
result.mark_as_nodata(i);
} else {
result[i] = scalar / static_cast<ResultType>(value);
}
}
return result;
}
template <typename T>
auto cbegin(const SparseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
auto cend(const SparseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
auto begin(SparseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto begin(const SparseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto end(SparseRaster<T>& ras)
{
return ras.end();
}
template <typename T>
auto end(const SparseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
auto size(const SparseRaster<T>& ras)
{
return ras.size();
}
template <typename T>
auto value_cbegin(const SparseRaster<T>& ras)
{
return ras.value_data();
}
template <typename T>
auto value_cend(const SparseRaster<T>& ras)
{
return ras.value_cend();
}
template <typename T>
auto value_begin(SparseRaster<T>& ras)
{
return ras.value_begin();
}
template <typename T>
auto value_begin(const SparseRaster<T>& ras)
{
return ras.value_begin();
}
template <typename T>
auto value_end(SparseRaster<T>& ras)
{
return ras.value_end();
}
template <typename T>
auto value_end(const SparseRaster<T>& ras)
{
return ras.value_cend();
}
}
|
spmm_x_csc.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <stdbool.h>
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_SPMAT_CSC *A, const ALPHA_SPMAT_CSC *B, ALPHA_SPMAT_CSC **matC)
{
check_return(A->cols != B->rows, ALPHA_SPARSE_STATUS_INVALID_VALUE);
ALPHA_SPMAT_CSC *mat = alpha_malloc(sizeof(ALPHA_SPMAT_CSC));
*matC = mat;
mat->rows = A->rows;
mat->cols = B->cols;
ALPHA_INT m = A->rows;
ALPHA_INT n = B->cols;
bool *flag = alpha_memalign(sizeof(bool) * m, DEFAULT_ALIGNMENT);
ALPHA_INT nnz = 0;
ALPHA_INT num_thread = alpha_get_thread_num();
for (ALPHA_INT bc = 0; bc < n; bc++)
{
memset(flag, '\0', sizeof(bool) * m);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread) reduction(+:nnz)
#endif
for (ALPHA_INT bi = B->cols_start[bc]; bi < B->cols_end[bc]; bi++)
{
ALPHA_INT ac = B->row_indx[bi];
for (ALPHA_INT ai = A->cols_start[ac]; ai < A->cols_end[ac]; ai++)
{
if (!flag[A->row_indx[ai]])
{
nnz += 1;
flag[A->row_indx[ai]] = true;
}
}
}
}
alpha_free(flag);
ALPHA_INT *col_offset = alpha_memalign(sizeof(ALPHA_INT) * (n + 1), DEFAULT_ALIGNMENT);
mat->cols_start = col_offset;
mat->cols_end = col_offset + 1;
mat->row_indx = alpha_memalign(nnz * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT);
mat->values = alpha_memalign(nnz * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
ALPHA_Number *values = alpha_memalign(sizeof(ALPHA_Number) * m, DEFAULT_ALIGNMENT);
ALPHA_INT index = 0;
mat->cols_start[0] = 0;
for (ALPHA_INT bc = 0; bc < n; bc++)
{
memset(values, '\0', sizeof(ALPHA_Number) * m);
bool *flagg = alpha_memalign(sizeof(bool) * m, DEFAULT_ALIGNMENT);
memset(flagg, '\0', sizeof(bool) * m);
for (ALPHA_INT bi = B->cols_start[bc]; bi < B->cols_end[bc]; bi++)
{
ALPHA_INT ac = B->row_indx[bi];
ALPHA_Number bv = B->values[bi];
bv = B->values[bi];
for (ALPHA_INT ai = A->cols_start[ac]; ai < A->cols_end[ac]; ai++)
{
ALPHA_INT ar = A->row_indx[ai];
ALPHA_Number tmp;
alpha_mul(tmp, bv, A->values[ai]);
alpha_adde(values[ar], tmp);
flagg[ar] = true;
}
}
for (ALPHA_INT r = 0; r < m; r++)
{
if(flagg[r])
{
mat->row_indx[index] = r;
mat->values[index] = values[r];
index += 1;
}
}
mat->cols_end[bc] = index;
alpha_free(flagg);
}
alpha_free(values);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#include <loops/ReduceType.h>
#define MIN_V 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d1 - d2);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(d1 - params[0]);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class SquaredReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1, Y d2, Z *params) {
auto d = static_cast<Z>(d2 - d1);
return d * d;
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto d = static_cast<Z>(params[0] - d1);
return d * d;
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoissonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X>
class IntOr {
public:
op_def static X op(X d1, X d2) {
return d2 | d1;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class IntAnd {
public:
op_def static X op(X d1, X d2) {
return d2 & d1;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class IntXor {
public:
op_def static X op(X d1, X d2) {
return d2 ^ d1;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class ShiftLeft {
public:
op_def static X op(X d1, X d2) {
return d1 << d2;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class ShiftRight {
public:
op_def static X op(X d1, X d2) {
return d1 >> d2;
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class CyclicShiftLeft {
public:
op_def static X op(X d1, X d2) {
return d1 << d2 | d1 >> ((sizeof(X) * 8) - d2);
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X>
class CyclicShiftRight {
public:
op_def static X op(X d1, X d2) {
return d1 >> d2 | d1 << ((sizeof(X) * 8) - d2);
}
op_def static X op(X d1, X d2, X *params) {
return op(d1, d2);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = nd4j::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN_V))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X>
class Cosine {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cos<X,X>(d1);
}
};
template <typename X>
class Exp {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X>
class HardTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 < static_cast<X>(-1))
return static_cast<X>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<X>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_floor<X,X>(d1);
}
};
template <typename X>
class Log {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(d1);
}
};
template <typename X>
class Log1p {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(nd4j::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X>
class StabilizeX {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return nd4j::DataTypeUtils::min<X>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X>
class Erf {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erf<X,X>(d1);
}
};
template <typename X>
class Erfc {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_erfc<X,X>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class ReversePow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(params[0], d1);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class Expm1 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction != static_cast<X>(0);
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction != static_cast<X>(0);
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < -_v)
return -_v;
else return d1;
}
};
template <typename X>
class Swish {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1);
}
};
template <typename X>
class GELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * nd4j::math::nd4j_sigmoid<X,X>(static_cast<X>(1.702f) * d1);
}
};
template <typename X>
class PreciseGELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto sp = nd4j::math::nd4j_sqrt<X, X>(static_cast<X>(2) / static_cast<X>(M_PI));
auto xp = d1 + nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(0.044715) * d1, static_cast<X>(3));
return (d1 / static_cast<X>(2)) * (static_cast<X>(1) + nd4j::math::nd4j_tanh<X, X>(sp * xp));
}
};
template <typename X>
class GELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x17 = static_cast<X>(1.702f) * d1;
auto ep = nd4j::math::nd4j_pow<X,X,X>(static_cast<X>(M_E), x17);
// (E^(1.702 x) (1. + E^(1.702 x) + 1.702 x))/(1. + E^(1.702 x))^2
return (ep * (static_cast<X>(1.f) + ep + x17)) / nd4j::math::nd4j_pow<X, int, X>((static_cast<X>(1.f) + ep), 2);
}
};
template <typename X>
class PreciseGELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto x79 = static_cast<X>(0.797885) * d1;
auto x03 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0356774) * d1, 3);
auto x39 = static_cast<X>(0.398942) * d1;
auto x05 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0535161) * d1, 3);
auto scz = nd4j::math::nd4j_sech<X, X>(x79 + x03);
// 0.5 + (0.398942 x + 0.0535161 x^3) Sech[0.797885 x + 0.0356774 x^3]^2 + 0.5 Tanh[0.797885 x + 0.0356774 x^3]
return static_cast<X>(0.5) + (x39 + x05) * (scz * scz) + static_cast<X>(0.5) * nd4j::math::nd4j_tanh<X, X>(x79 + x03);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X>
class LogSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X>
class Sigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoid<X, X>(d1);
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X>
class HardSigmoid {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X>
class SetRange {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max)
return d1;
if (min == static_cast<X>(0) && max == static_cast<X>(1)) {
auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1));
return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min);
}
return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min);
}
};
template <typename X>
class Sin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sin<X,X>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X>
class Rint {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_rint<X,X>(d1);
}
};
template <typename X>
class SoftPlus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::softplus<X, X>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X>
class RationalTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1;
auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) )));
return static_cast<X>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X>
class Tanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanh<X, X>(d1);
}
};
template <typename X>
class RectifiedTanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X>
class ATanh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atanh<X,X>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X>
class ACos {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acos<X, X>(d1);
}
};
template <typename X>
class ASinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asinh<X, X>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X>
class ACosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_acosh<X, X>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X>
class SoftSign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsign<X, X>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1));
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class ELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_elu<X,Z>(d1, static_cast<X>(d2));
}
};
template <typename X, typename Y, typename Z>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_eluderivative<X,Z>(d1, static_cast<X>(d2));
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto val = static_cast<Z>(d1);
auto alpha = static_cast<Z>(d2);
return val < 0.0f ? alpha * val : val;
}
};
template <typename X>
class SELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X>
class ASin {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_asin<X,X>(d1);
}
};
template <typename X>
class Sinh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sinh<X,X>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X>
class Cosh {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X,X>(d1);
}
};
template <typename X>
class Tan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tan<X,X>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X>
class ATan {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_atan<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ReduceSameBenchmarkOp {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<X>(nd4j::math::nd4j_pow<float,float,float>(f1, 3)
+ nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1)
/ nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto p = d1 * d1;
return static_cast<Z>(p) * nd4j::math::nd4j_log<X, Z>(p);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return nd4j::math::nd4j_log<Z, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::ASUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT;
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction / (Z) n;
}
};
template <typename X, typename Z>
class ReduceFloatBenchmarkOp {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
auto f1 = static_cast<float>(d1);
return static_cast<Z>(nd4j::math::nd4j_pow<float,float,float>(f1, 3)
+ nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1)
/ nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1)
* nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1)
- nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_abs<Z>(reduction) / static_cast<Z>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MAX;
op_def static X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2))
return z1;
else
return z2;
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMAX;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::AMIN;
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
const static functions::ReduceType reduceType = functions::ReduceType::MIN;
op_def static X startingValue(const X *input) {
return nd4j::DataTypeUtils::infOrMax<X>();
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<Z, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = nd4j::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<Z, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_pow<Z, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(Z old, Z opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, Z *extraParams) {
return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(old),
nd4j::math::nd4j_abs<Z>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(reduction), nd4j::math::nd4j_abs<Z>(reduction));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
const static functions::ReduceType reduceType = functions::ReduceType::SUM;
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]);
return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps));
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return nd4j::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return nd4j::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X, typename Z>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return nd4j::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X, typename Z>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X, typename Z>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(const X *input) {
return -nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(const X *input) {
return nd4j::DataTypeUtils::infOrMax<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
else
return nd4j::math::nd4j_sqrt<double, Z>(ret);
}
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = blockIdx.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
SampleAlongProbes.h | /*
* SampleAlongProbes.h
* Copyright (C) 2009-2017 by MegaMol Team
* Alle Rechte vorbehalten.
*/
#ifndef SAMPLE_ALONG_PROBES_H_INCLUDED
#define SAMPLE_ALONG_PROBES_H_INCLUDED
#include "mmcore/CalleeSlot.h"
#include "mmcore/CallerSlot.h"
#include "mmcore/Module.h"
#include "ProbeCollection.h"
#include "mmcore/param/ParamSlot.h"
#include "kdtree.h"
#include "mmcore/param/IntParam.h"
#include "adios_plugin/CallADIOSData.h"
namespace megamol {
namespace probe {
class SampleAlongPobes : public core::Module {
public:
/**
* Answer the name of this module.
*
* @return The name of this module.
*/
static const char* ClassName() { return "SampleAlongProbes"; }
/**
* Answer a human readable description of this module.
*
* @return A human readable description of this module.
*/
static const char* Description() { return "..."; }
/**
* Answers whether this module is available on the current system.
*
* @return 'true' if the module is available, 'false' otherwise.
*/
static bool IsAvailable(void) { return true; }
SampleAlongPobes();
virtual ~SampleAlongPobes();
protected:
virtual bool create();
virtual void release();
uint32_t _version;
core::CalleeSlot _probe_lhs_slot;
core::CallerSlot _probe_rhs_slot;
size_t _probe_cached_hash;
core::CallerSlot _adios_rhs_slot;
size_t _adios_cached_hash;
core::CallerSlot _full_tree_rhs_slot;
size_t _full_tree_cached_hash;
core::param::ParamSlot _parameter_to_sample_slot;
core::param::ParamSlot _num_samples_per_probe_slot;
core::param::ParamSlot _sample_radius_factor_slot;
core::param::ParamSlot _sampling_mode;
core::param::ParamSlot _vec_param_to_samplex_x;
core::param::ParamSlot _vec_param_to_samplex_y;
core::param::ParamSlot _vec_param_to_samplex_z;
core::param::ParamSlot _vec_param_to_samplex_w;
private:
//TODO rename to "doScalarSampling" ?
template <typename T>
void doSampling(const std::shared_ptr<pcl::KdTreeFLANN<pcl::PointXYZ>>& tree, std::vector<T>& data);
template <typename T>
void doVectorSamling(const std::shared_ptr<pcl::KdTreeFLANN<pcl::PointXYZ>>& tree, const std::vector<T>& data_x,
const std::vector<T>& data_y, const std::vector<T>& data_z, const std::vector<T>& data_w);
bool getData(core::Call& call);
bool getMetaData(core::Call& call);
std::shared_ptr<ProbeCollection> _probes;
size_t _old_datahash;
bool _trigger_recalc;
bool paramChanged(core::param::ParamSlot& p);
};
template <typename T>
void SampleAlongPobes::doSampling(const std::shared_ptr<pcl::KdTreeFLANN<pcl::PointXYZ>>& tree, std::vector<T>& data) {
const int samples_per_probe = this->_num_samples_per_probe_slot.Param<core::param::IntParam>()->Value();
const float sample_radius_factor = this->_sample_radius_factor_slot.Param<core::param::FloatParam>()->Value();
//#pragma omp parallel for
for (int32_t i = 0; i < static_cast<int32_t>(_probes->getProbeCount()); i++) {
FloatProbe probe;
auto visitor = [&probe, i, this](auto&& arg) {
using T = std::decay_t<decltype(arg)>;
if constexpr (std::is_same_v<T, probe::BaseProbe> || std::is_same_v<T, probe::Vec4Probe>) {
probe.m_timestamp = arg.m_timestamp;
probe.m_value_name = arg.m_value_name;
probe.m_position = arg.m_position;
probe.m_direction = arg.m_direction;
probe.m_begin = arg.m_begin;
probe.m_end = arg.m_end;
_probes->setProbe(i, probe);
} else if constexpr (std::is_same_v<T, probe::FloatProbe>) {
probe = arg;
} else {
// unknown/incompatible probe type, throw error? do nothing?
}
};
auto generic_probe = _probes->getGenericProbe(i);
std::visit(visitor, generic_probe);
std::shared_ptr<FloatProbe::SamplingResult> samples = probe.getSamplingResult();
auto sample_step = probe.m_end / static_cast<float>(samples_per_probe);
auto radius = sample_step * sample_radius_factor;
float min_value = std::numeric_limits<float>::max();
float max_value = -std::numeric_limits<float>::max();
float avg_value = 0.0f;
samples->samples.resize(samples_per_probe);
for (int j = 0; j < samples_per_probe; j++) {
pcl::PointXYZ sample_point;
sample_point.x = probe.m_position[0] + j * sample_step * probe.m_direction[0];
sample_point.y = probe.m_position[1] + j * sample_step * probe.m_direction[1];
sample_point.z = probe.m_position[2] + j * sample_step * probe.m_direction[2];
std::vector<uint32_t> k_indices;
std::vector<float> k_distances;
auto num_neighbors = tree->radiusSearch(sample_point, radius, k_indices, k_distances);
if (num_neighbors == 0) {
num_neighbors = tree->nearestKSearch(sample_point, 1, k_indices, k_distances);
}
// accumulate values
float value = 0;
for (int n = 0; n < num_neighbors; n++) {
value += data[k_indices[n]];
} // end num_neighbors
value /= num_neighbors;
samples->samples[j] = value;
min_value = std::min(min_value, value);
max_value = std::max(max_value, value);
avg_value += value;
} // end num samples per probe
avg_value /= samples_per_probe;
samples->average_value = avg_value;
samples->max_value = max_value;
samples->min_value = min_value;
} // end for probes
}
template <typename T>
inline void SampleAlongPobes::doVectorSamling(
const std::shared_ptr<pcl::KdTreeFLANN<pcl::PointXYZ>>& tree,
const std::vector<T>& data_x,
const std::vector<T>& data_y,
const std::vector<T>& data_z,
const std::vector<T>& data_w) {
const int samples_per_probe = this->_num_samples_per_probe_slot.Param<core::param::IntParam>()->Value();
const float sample_radius_factor = this->_sample_radius_factor_slot.Param<core::param::FloatParam>()->Value();
//#pragma omp parallel for
for (int32_t i = 0; i < static_cast<int32_t>(_probes->getProbeCount()); i++) {
Vec4Probe probe;
auto visitor = [&probe,i,this](auto&& arg) {
using T = std::decay_t<decltype(arg)>;
if constexpr (std::is_same_v<T, probe::BaseProbe> || std::is_same_v<T, probe::FloatProbe>) {
probe.m_timestamp = arg.m_timestamp;
probe.m_value_name = arg.m_value_name;
probe.m_position = arg.m_position;
probe.m_direction = arg.m_direction;
probe.m_begin = arg.m_begin;
probe.m_end = arg.m_end;
_probes->setProbe(i, probe);
} else if constexpr (std::is_same_v<T, probe::Vec4Probe>) {
probe = arg;
} else {
// unknown/incompatible probe type, throw error? do nothing?
}
};
auto generic_probe = _probes->getGenericProbe(i);
std::visit(visitor, generic_probe);
std::shared_ptr<Vec4Probe::SamplingResult> samples = probe.getSamplingResult();
auto sample_step = probe.m_end / static_cast<float>(samples_per_probe);
auto radius = sample_step * sample_radius_factor;
float min_value = std::numeric_limits<float>::max();
float max_value = -std::numeric_limits<float>::max();
float avg_value = 0.0f;
samples->samples.resize(samples_per_probe);
for (int j = 0; j < samples_per_probe; j++) {
pcl::PointXYZ sample_point;
sample_point.x = probe.m_position[0] + j * sample_step * probe.m_direction[0];
sample_point.y = probe.m_position[1] + j * sample_step * probe.m_direction[1];
sample_point.z = probe.m_position[2] + j * sample_step * probe.m_direction[2];
std::vector<uint32_t> k_indices;
std::vector<float> k_distances;
auto num_neighbors = tree->radiusSearch(sample_point, radius, k_indices, k_distances);
if (num_neighbors == 0) {
num_neighbors = tree->nearestKSearch(sample_point, 1, k_indices, k_distances);
}
// accumulate values
float value_x = 0, value_y = 0, value_z = 0, value_w = 0;
for (int n = 0; n < num_neighbors; n++) {
value_x += data_x[k_indices[n]];
value_y += data_y[k_indices[n]];
value_z += data_z[k_indices[n]];
value_w += data_w[k_indices[n]];
} // end num_neighbors
samples->samples[j][0] = value_x / num_neighbors;;
samples->samples[j][1] = value_y / num_neighbors;;
samples->samples[j][2] = value_z / num_neighbors;;
samples->samples[j][3] = value_w / num_neighbors;;
//min_value = std::min(min_value, value);
//max_value = std::max(max_value, value);
//avg_value += value;
} // end num samples per probe
//avg_value /= samples_per_probe;
//samples->average_value = avg_value;
//samples->max_value = max_value;
//samples->min_value = min_value;
} // end for probes
}
} // namespace probe
} // namespace megamol
#endif // !SAMPLE_ALONG_PROBES_H_INCLUDED
|
SparseGrid3D.h | /* ParSGCT Code source file.
Copyright (c) 2015 Peter Edward Strazdins. All rights reserved.
Licensed under the terms of the BSD License as described in the LICENSE_SGCT file.
This comment must be retained in any redistributions of this source file.
*/
// 3D (x,y,z) sparse grid data structure
// Storage is contiguous in the x co-ordinate
// written by Peter Strazdins, June 15
/* This data structure is for a distributed, (partially) filled sparse grid.
This arises from the truncated SGCT formula. Let g be the grid index
and l be the level, with g >= l. The filled sparse grid will
have a factor of 2^(g-l) elements filled in over the classic sparse grid
(in the places where this is possible).
*/
#ifndef SPARSEGRID3D_INCLUDED
#define SPARSEGRID3D_INCLUDED
// #define NOOPT2D /* remove 2D B=1 optimization on interpolate() */
#include <stdio.h>
#include <assert.h>
#include <cmath> //std::abs, log2
#include <string> //std::string
#ifdef _OPENMP
#include <omp.h>
#endif
#include "Vec3D.h"
#include "ProcGrid3D.h"
class SparseGrid3D {
public:
int level; // truncated SGCT level
bool is2d;
Vec3D<int> g; // global s.g. index
ProcGrid3D* pg; // s.g. process grid vectors
int B; // size of individual elements
bool useFullGrid; // if true use uh, else use other fields
HaloArray3D *uh; // full grid (dense) data structure
bool allocedUh; // records if uh was allocated by constructor
// sparse grid data structure
double *u; // storage vector
int nz; // local s.g. length in z-dimension (=1 for 2D case)
int *ny; // local s.g. lengths (per z) in y-dimension
int *cs; // col (y) stride vector
int **rx, **rs; // row index vector (as in CSR format) and stride per z
private:
int sz_u; // total number of elements in s.g. for this process
static inline bool isPower2(int v) {
return (v & (v-1)) == 0;
}
static inline int gridSz(int g) {
return (g < 0)? 0: ((1 << g) + 1);
}
inline Vec3D<int> gridSz(Vec3D<int> g) {
return Vec3D<int>(gridSz(g.x), gridSz(g.y), (is2d == 0) + (1 << g.z));
}
static inline int numRightZeroes(int jG) {
assert (jG > 0);
int nz = 0;
while ((jG & 1) == 0)
jG >>= 1, nz++;
return(nz);
}
public:
SparseGrid3D(HaloArray3D *uh_, bool is2dim,
Vec3D<int> g_, ProcGrid3D *pg_, int blk=1) {
allocedUh = false;
useFullGrid = true; is2d = is2dim; g = g_; pg = pg_; B = blk;
rx = 0; rs = 0; u = 0; // to trap bad references by clients
ny = 0; cs = 0;
uh = uh_;
sz_u = uh->l.prod();
nz = uh->l.z;
} //SparseGrid3D()
SparseGrid3D() {}
SparseGrid3D(bool useFG, int lv, bool is2dim,
Vec3D<int> g_, ProcGrid3D *pg_, int blk=1){
allocedUh =false;
useFullGrid = useFG; level = lv; is2d = is2dim; g = g_; pg = pg_; B = blk;
rx = 0; rs = 0; u = 0; uh = 0; // to trap bad references by clients
ny = 0; cs = 0;
if (useFullGrid) {
allocedUh = true;
uh = new HaloArray3D(pg->G2L(gridSz(g)), Vec3D<int>(0), B);
sz_u = uh->l.prod();
nz = uh->l.z;
return;
}
sz_u = 0; nz = 0;
if (pg->myrank < 0) // this process does not hold part of s.g
return;
assert (isPower2(pg->P.x) && (is2d || isPower2(pg->P.y)));
// needed to calc. nx, ny
int Nz = gridSz(g.z);
nz = is2d? 1: pg->G2L(2, Nz);
ny = new int[nz]; cs = new int [nz];
rx = new int* [nz]; rs = new int* [nz];
int kG = (g.z==0)? 0: pg->L2G0(2, Nz);
for (int k=0; k < nz; k++,kG++) {
int lk = numRightZeroes((2*kG) | (1 << level));
assert (0 < lk && lk <= level);
assert (g.z > 0 || lk == level); //check for 2D case
int Ny = gridSz(g.y - level + lk); //global number of s.g. elements at kG
// calculate local length (y) for the sparse block distribution
ny[k] = (Ny >= pg->P.y)? pg->G2L(1, Ny):
(pg->id.y % (pg->P.y / (Ny-1)) == 0);
cs[k] = 1 << (level - lk); // =1 for the 2D case
// this is needed for interopolate() in order to calc. sparse offsets.
// should be true if isPower2(pg->P.y) is
assert (Ny < pg->P.y || pg->L2G0(1, gridSz(g.y)) % cs[k] == 0);
rs[k] = new int[ny[k]];
rx[k] = new int[ny[k]+1];
rx[k][0] = k==0? 0: rx[k-1][ny[k-1]];
int jG = pg->L2G0(1, Ny);
for (int j=0; j < ny[k]; j++,jG++) {
int lj = numRightZeroes((2*jG) | (1 << lk));
assert (0 < lj && lj <= lk);
rs[k][j] = 1 << (level-lj);
int Nx = gridSz(g.x - level + lj); // global # s.g. elements at kG,jG
// calculate local length corresp. Nx for the sparse block distribution
int nx = (Nx >= pg->P.x)? pg->G2L(0, Nx):
(pg->id.x % (pg->P.x / (Nx-1)) == 0);
// this is needed for interopolate() in order to calc. sparse offsets
// should be true if isPower2(pg->P.x) is
assert (Nx < pg->P.x || pg->L2G0(0, gridSz(g.x)) % rs[k][j] == 0);
rx[k][j+1] = rx[k][j] + nx*B;
sz_u += nx*B;
} //for(j...)
} //for(k...)
if (sz_u > 0)
u = new double[sz_u];
} //SparseGrid3D()
~SparseGrid3D() {
if (useFullGrid && allocedUh) {
delete uh;
return;
}
if (u != 0)
delete[] u;
if (rs != 0) {
for (int k=0; k < nz; k++)
delete[] rs[k];
delete[] rs;
}
if (rx != 0) {
for (int k=0; k < nz; k++)
delete rx[k];
delete[] rx;
}
if (cs != 0)
delete[] cs;
if (ny != 0)
delete ny;
} //~~SparseGrid3D
int numElts() {
return (sz_u);
}
void zero() {
if (useFullGrid) {
uh->zero();
return;
}
#pragma omp parallel for default(shared)
for (int i = 0; i < sz_u; i++) {
u[i] = 0.0;
}
}
/* in the following, we take advantage of 2 facts:
1. every point in v must be represented in the s.g.
2. v is aligned to the s.g. at offset i0
*/
// simplified (hence optimized) case for 2D B=1
void interpolate2DB1(double coeff, HaloArray3D *v, Vec3D<int> rV,
Vec3D<int> i0, Vec3D<int> n) {
#pragma omp parallel for default(shared)
for (int j = 0; j < n.y; j++) {
int jS = j+i0.y, sx = rs[0][jS];
if (rx[0][jS+1] == rx[0][jS]) { // has no elements as rs[jS] > pg->P.x
assert (sx > pg->P.x);
continue;
}
// scale f.g. to s.g. indices; assumes L2G0(0, Nx) % rs[jS] == 0
int xr0 = i0.x%sx, xOffs = xr0==0? 0: sx-xr0; // perform alignment
int nx = (n.x - xOffs + sx-1) / sx;
int i0x = (i0.x + sx-1) / sx;
assert (nx == 0 || i0x + nx-1 <= rx[0][jS+1] - rx[0][jS]);
double *uv = &u[rx[0][jS] + i0x];
double y = (1.0 * j) / rV.y; int iy = (int) y;
double ry = y - iy, ryc = 1.0 - ry;
for (int i=0; i < nx; i++) { // only interpolate on points in the s.g.
double x = (1.0 * (xOffs + i * sx)) / rV.x ;
int ix = (int) x;
double rx = x - ix, rxc = 1.0 - rx;
double interpolant =
rxc*ryc * *(v->ix_h(ix, iy, 0)) +
rx *ryc * *(v->ix_h(ix+1, iy, 0)) +
rxc*ry * *(v->ix_h(ix, iy+1, 0)) +
rx *ry * *(v->ix_h(ix+1, iy+1, 0));
uv[i] += coeff * interpolant;
} //for(i...)
} //for(j...)
} //interpolate2DB1()
// interpolate coeff * v, where resolution(v)*rV = resolution(s.g.),
// into local s.g. elements i0..i0+dn-1
void interpolate(double coeff, HaloArray3D *v, Vec3D<int> rV,
Vec3D<int> i0, Vec3D<int> n) {
if (useFullGrid) {
uh->interpolate(coeff, v, rV, i0, n);
return;
}
#ifndef NOOPT2D
if (is2d && B==1) {
interpolate2DB1(coeff, v, rV, i0, n);
return;
}
#endif
for (int k = 0; k < n.z; k++) {
int kS = k+i0.z;
assert (kS < nz);
int sy = cs[kS];
// scale f.g. to s.g. indices; assumes L2G0(1, Ny) % sy == 0
int yr0 = i0.y % sy, yOffs = yr0==0? 0: sy-yr0; // perform alignment
int ny_ = (n.y - yOffs + sy-1) / sy;
int i0y = (i0.y + sy-1) / sy;
assert (ny == 0 || i0y + ny_-1 < ny[kS]);
double z = (1.0 * k) / rV.z;
int iz = (int) z;
double rz = z - iz, rzc = 1.0 - rz;
#pragma omp parallel for default(shared)
for (int j = 0; j < ny_; j++) {
int jS = j+i0y;
if (rx[kS][jS+1] == rx[kS][jS]) { // has no elts. as rs[kS][jS]>pg->P.x
assert (rs[kS][jS] > pg->P.x);
continue;
}
int sx = rs[kS][jS];
// scale f.g. to s.g. indices; assumes L2G0(0, Nx) % sx == 0
int xr0 = i0.x % sx, xOffs = xr0==0? 0: sx-xr0; // perform alignment
int nx = (n.x - xOffs + sx-1) / sx;
int i0x = (i0.x + sx-1) / sx;
assert (nx == 0 || i0x + nx-1 < (rx[kS][jS+1] - rx[kS][jS])/B);
double *uv = &u[rx[kS][jS] + i0x*B];
double y = (1.0 * (yOffs + j * sy)) / rV.y;
int iy = (int) y;
double ry = y - iy, ryc = 1.0 - ry;
for (int i=0; i < nx; i++) { // only interpolate on points in the s.g.
double x = (1.0 * (xOffs + i * sx)) / rV.x ;
int ix = (int) x;
double rx = x - ix, rxc = 1.0 - rx;
int iB = i*B, ixB = ix*B;
for (int ib=0; ib < B; ib++) {
double interpolant =
rxc*ryc*rzc * *(v->ix_h(ixB+ib, iy, iz)) +
rx *ryc*rzc * *(v->ix_h(ixB+ib+B, iy, iz));
if (rV.y > 1)
interpolant +=
rxc*ry*rzc * *(v->ix_h(ixB+ib, iy+1, iz)) +
rx *ry*rzc * *(v->ix_h(ixB+ib+B, iy+1, iz));
if (rV.z > 1) {
interpolant +=
rxc*ryc*rz * *(v->ix_h(ixB+ib, iy, iz+1)) +
rx *ryc*rz * *(v->ix_h(ixB+B+ib, iy, iz+1));
if (rV.y > 1)
interpolant +=
rxc*ry *rz * *(v->ix_h(ixB+ib, iy+1, iz+1)) +
rx *ry *rz * *(v->ix_h(ixB+B+ib, iy+1, iz+1));
}
uv[iB+ib] += coeff * interpolant;
// printf("kSjS=%d,%d sy, rc=%.0f v=%.0f x,y,z=%.1f,%.1f,%.1f \n",
// kS, jS, rxc*ryc*rzc, *(v->ix_h(ixB+ib, iy, iz)), x, y, z);
}
} //for(i...)
} //for(j...)
} //for(k...)
} //interpolate()
// sample s.g. elements i0:(i0+dn-1)*rV:rV into v,
// where resolution(v)*rV = resolution(s.g.)
double *sample(Vec3D<int> i0, Vec3D<int> rV, Vec3D<int> n) {
if (useFullGrid)
return (uh->sample(i0, rV, n));
double *buf = new double [n.prod()*B];
assert(i0.z + (n.z-1)*rV.z < nz);
for (int k=0; k < n.z; k++) {
int kS = i0.z + k*rV.z;
int sy = cs[kS];
int jScale = rV.y / sy; assert(jScale > 0);
int jS = (i0.y + sy-1) / sy;
assert(jS + (n.y-1)*jScale < ny[kS]);
#pragma omp parallel for default(shared)
for (int j=0; j < n.y; j++) {
double *v = &buf[(k*n.y + j) * n.x * B];
int sx = rs[kS][jS];
int iScale = (rV.x / sx) * B; assert(iScale > 0);
int iS = ((i0.x + sx-1) / sx) * B;
double *uv = &u[rx[kS][jS]];
assert (iS + iScale*(n.x-1) < rx[kS][jS+1] - rx[kS][jS]);
for (int i=0; i < n.x; i++, iS+=iScale) {
for (int ib = 0; ib < B; ib++) {
*v = uv[iS + ib];
v++;
}
}
jS += jScale;
} //for(j...)
} //for(k...)
return buf;
} //sample()
void print(int rank, std::string label) {
if (useFullGrid) {
uh->print(rank, label);
return;
}
if (rank < 0)
return;
for (int k=0; k < nz; k++) {
if (label.c_str()[0] != 0)
printf("%d: %s[%d]:\n", rank, label.c_str(), k);
int Nx = gridSz(g.x), nx = pg->G2L(0, Nx);
for (int j=0; j < ny[k]; j++) {
printf("%d: %d", rank, rx[k][j]);
double *v = &u[rx[k][j]];
int iG = pg->L2G0(0, Nx);
for (int i=0; i < nx; i++, iG++)
for (int ib=0; ib < B; ib++) {
if (iG % rs[k][j] == 0) {
assert (v < &u[rx[k][j+1]]);
const char *pad = *v < 10? " ": *v < 100? " ": "";
printf("%s%3.1f ", pad, *v);
v++;
} else
printf(" ");
}
printf("\n");
}
printf("\n");
}
} //print()
}; //SparseGrid3D
#endif /*SPARSEGRID3D_INCLUDED*/
|
tool_available_search.c | // RUN: %clang %flags -shared -fPIC %s -o %T/first_tool.so
// RUN: %clang %flags -DTOOL -DSECOND_TOOL -shared -fPIC %s -o %T/second_tool.so
// RUN: %clang %flags -DTOOL -DTHIRD_TOOL -shared -fPIC %s -o %T/third_tool.so
// RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/non_existing_file.so:%T/first_tool.so:%T/second_tool.so:%T/third_tool.so %libomp-run | FileCheck %s
// REQUIRES: ompt
/*
* This file contains code for three OMPT shared library tool to be
* loaded and the code for the OpenMP executable.
* No option enables code for the first shared library
* (without an implementation of ompt_start_tool) during compilation
* -DTOOL -DSECOND_TOOL enables the code for the second tool during compilation
* -DTOOL -DTHIRD_TOOL enables the code for the third tool during compilation
* -DCODE enables the code for the executable during compilation
*/
#ifdef CODE
#include "stdio.h"
#include "omp.h"
#include "ompt.h"
int main()
{
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
int result = omp_control_tool(omp_control_tool_start, 0, NULL);
printf("0: control_tool()=%d\n", result);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: {{^}}0: Do not initialize tool
// CHECK: {{^}}0: Do initialize tool
// CHECK: {{^}}0: Tool initialized
// CHECK: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: control_tool()=-1
// CHECK: {{^}}0: Tool finalized
return 0;
}
#endif /* CODE */
#ifdef TOOL
#include <ompt.h>
#include "stdio.h"
#ifdef SECOND_TOOL
// The second tool has an implementation of ompt_start_tool that returns NULL
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do not initialize tool\n");
return NULL;
}
#elif defined(THIRD_TOOL)
// The third tool has an implementation of ompt_start_tool that returns a
// pointer to a valid instance of ompt_start_tool_result_t
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
printf("0: ompt_event_thread_begin\n");
}
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t *tool_data)
{
ompt_set_callback_t ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_set_callback(ompt_callback_thread_begin, (ompt_callback_t)on_ompt_callback_thread_begin);
printf("0: Tool initialized\n");
return 1;
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: Tool finalized\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do initialize tool\n");
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#endif
#endif /* TOOL */
|
openmp2.c | #include <stdio.h>
#include <omp.h>
#define SIZE 20000
// export OMP_NUM_THREADS=400
int main(int argc, char const *argv[]) {
int a[SIZE];
int b[SIZE];
int c[SIZE];
int chunk = 100;
int j;
for (size_t i = 0; i < SIZE; i++) {
a[i] = i;
b[i] = 2 * i;
}
#pragma omp parallel shared(a, b, c, chunk) {
#pragma omp for schedule(dynamic, chunk) nowait
for (j = 0; j < SIZE; j++) {
c[j] = a[j] + b[j];
}
}
return 0;
}
|
random.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2018 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file random.h
* \brief Randomness-related functions
*/
#ifndef RANDOM_H_
#define RANDOM_H_
namespace qpp {
/**
* \brief Generates a random real number uniformly distributed in
* the interval [a, b)
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random real number (double) uniformly distributed in
* the interval [a, b)
*/
inline double rand(double a, double b) {
// EXCEPTION CHECKS
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
std::uniform_real_distribution<> ud(a, b);
#ifdef NO_THREAD_LOCAL_
return ud(RandomDevices::get_instance().get_prng());
#else
return ud(RandomDevices::get_thread_local_instance().get_prng());
#endif // NO_THREAD_LOCAL_
}
/**
* \brief Generates a random big integer uniformly distributed in
* the interval [a, b]
*
* \note To avoid ambiguity with double qpp::rand(double, double) cast at
* least one of the arguments to qpp::bigint
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, belongs to it
* \return Random big integer uniformly distributed in the interval [a, b]
*/
inline bigint rand(bigint a, bigint b) {
// EXCEPTION CHECKS
if (a > b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
std::uniform_int_distribution<bigint> uid(a, b);
#ifdef NO_THREAD_LOCAL_
return uid(RandomDevices::get_instance().get_prng());
#else
return uid(RandomDevices::get_thread_local_instance().get_prng());
#endif // NO_THREAD_LOCAL_
}
/**
* \brief Generates a random index (idx) uniformly distributed in
* the interval [a, b]
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, belongs to it
* \return Random index (idx) uniformly distributed in the interval [a, b]
*/
inline idx randidx(idx a = std::numeric_limits<idx>::min(),
idx b = std::numeric_limits<idx>::max()) {
// EXCEPTION CHECKS
if (a > b)
throw exception::OutOfRange("qpp::randidx()");
// END EXCEPTION CHECKS
std::uniform_int_distribution<idx> uid(a, b);
#ifdef NO_THREAD_LOCAL_
return uid(RandomDevices::get_instance().get_prng());
#else
return uid(RandomDevices::get_thread_local_instance().get_prng());
#endif // NO_THREAD_LOCAL_
}
/**
* \brief Generates a random matrix with entries uniformly
* distributed in the interval [a, b)
*
* If complex, then both real and imaginary parts are uniformly distributed
* in [a, b)
*
* This is the generic version that always throws
* qpp::Exception::Type::UNDEFINED_TYPE. It is specialized only for
* qpp::dmat and qpp::cmat
*/
template <typename Derived>
Derived rand(idx rows, idx cols, double a = 0, double b = 1) {
// silence -Wunused-parameter in clang++
(void) rows;
(void) cols;
(void) a;
(void) b;
throw exception::UndefinedType("qpp::rand()");
}
/**
* \brief Generates a random real matrix with entries uniformly
* distributed in the interval [a, b),
* specialization for double matrices (qpp::dmat)
*
* The template parameter cannot be automatically deduced and
* must be explicitly provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXd,
* // with entries uniformly distributed in [-1,1)
* dmat mat = rand<dmat>(3, 3, -1, 1);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random real matrix
*/
template <>
inline dmat rand(idx rows, idx cols, double a, double b) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::rand()");
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
return dmat::Zero(rows, cols).unaryExpr([a, b](double) {
return rand(a, b);
});
}
/**
* \brief Generates a random complex matrix with entries (both real and
* imaginary) uniformly distributed in the interval [a, b),
* specialization for complex matrices (qpp::cmat)
*
* The template parameter cannot be automatically deduced and
* must be explicitly provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXcd,
* // with entries (both real and imaginary) uniformly distributed in [-1,1)
* cmat mat = rand<cmat>(3, 3, -1, 1);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random complex matrix
*/
template <>
inline cmat rand(idx rows, idx cols, double a, double b) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::rand()");
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
return rand<dmat>(rows, cols, a, b).cast<cplx>() +
1_i * rand<dmat>(rows, cols, a, b).cast<cplx>();
}
/**
* \brief Generates a random matrix with entries normally
* distributed in N(mean, sigma)
*
* If complex, then both real and imaginary parts are normally distributed
* in N(mean, sigma)
*
* This is the generic version that always throws
* qpp::Exception::Type::UNDEFINED_TYPE. It is specialized only for
* qpp::dmat and qpp::cmat
*/
template <typename Derived>
Derived randn(idx rows, idx cols, double mean = 0, double sigma = 1) {
// silence -Wunused-parameter in clang++
(void) rows;
(void) cols;
(void) mean;
(void) sigma;
throw exception::UndefinedType("qpp::randn()");
}
/**
* \brief Generates a random real matrix with entries normally
* distributed in N(mean, sigma),
* specialization for double matrices (qpp::dmat)
*
* The template parameter cannot be automatically deduced and
* must be explicitly provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXd,
* // with entries normally distributed in N(0,2)
* dmat mat = randn<dmat>(3, 3, 0, 2);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param mean Mean
* \param sigma Standard deviation
* \return Random real matrix
*/
template <>
inline dmat randn(idx rows, idx cols, double mean, double sigma) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::randn()");
// END EXCEPTION CHECKS
std::normal_distribution<> nd(mean, sigma);
return dmat::Zero(rows, cols).unaryExpr([&nd](double) {
#ifdef NO_THREAD_LOCAL_
return nd(RandomDevices::get_instance().get_prng());
#else
return nd(RandomDevices::get_thread_local_instance().get_prng());
#endif // NO_THREAD_LOCAL_
});
}
/**
* \brief Generates a random complex matrix with entries (both real and
* imaginary) normally distributed in N(mean, sigma),
* specialization for complex matrices (qpp::cmat)
*
* The template parameter cannot be automatically deduced and
* must be explicitly provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXcd,
* // with entries (both real and imaginary) normally distributed in N(0,2)
* cmat mat = randn<cmat>(3, 3, 0, 2);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param mean Mean
* \param sigma Standard deviation
* \return Random complex matrix
*/
template <>
inline cmat randn(idx rows, idx cols, double mean, double sigma) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::randn()");
// END EXCEPTION CHECKS
return randn<dmat>(rows, cols, mean, sigma).cast<cplx>() +
1_i * randn<dmat>(rows, cols, mean, sigma).cast<cplx>();
}
/**
* \brief Generates a random real number (double) normally distributed in
* N(mean, sigma)
*
* \param mean Mean
* \param sigma Standard deviation
* \return Random real number normally distributed in N(mean, sigma)
*/
inline double randn(double mean = 0, double sigma = 1) {
std::normal_distribution<> nd(mean, sigma);
#ifdef NO_THREAD_LOCAL_
return nd(RandomDevices::get_instance().get_prng());
#else
return nd(RandomDevices::get_thread_local_instance().get_prng());
#endif // NO_THREAD_LOCAL_
}
/**
* \brief Generates a random unitary matrix
*
* \param D Dimension of the Hilbert space
* \return Random unitary
*/
inline cmat randU(idx D = 2)
// ~3 times slower than Toby Cubitt's MATLAB corresponding routine,
// because Eigen 3 QR algorithm is not parallelized
{
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randU()");
// END EXCEPTION CHECKS
cmat X = 1 / std::sqrt(2.) * randn<cmat>(D, D);
Eigen::HouseholderQR<cmat> qr(X);
cmat Q = qr.householderQ();
// phase correction so that the resultant matrix is
// uniformly distributed according to the Haar measure
Eigen::VectorXcd phases = (rand<dmat>(D, 1)).cast<cplx>();
for (idx i = 0; i < static_cast<idx>(phases.rows()); ++i)
phases(i) = std::exp(2 * pi * 1_i * phases(i));
Q = Q * phases.asDiagonal();
return Q;
}
/**
* \brief Generates a random isometry matrix
*
* \param Din Size of the input Hilbert space
* \param Dout Size of the output Hilbert space
* \return Random isometry matrix
*/
inline cmat randV(idx Din, idx Dout) {
// EXCEPTION CHECKS
if (Din == 0 || Dout == 0 || Din > Dout)
throw exception::DimsInvalid("qpp::randV()");
// END EXCEPTION CHECKS
return randU(Dout).block(0, 0, Dout, Din);
}
/**
* \brief Generates a set of random Kraus operators
*
* \note The set of Kraus operators satisfy the closure condition
* \f$ \sum_i K_i^\dagger K_i = I\f$
*
* \param N Number of Kraus operators
* \param D Dimension of the Hilbert space
* \return Set of \a N Kraus operators satisfying the closure condition
*/
inline std::vector<cmat> randkraus(idx N, idx D = 2) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::OutOfRange("qpp::randkraus()");
if (D == 0)
throw exception::DimsInvalid("qpp::randkraus()");
// END EXCEPTION CHECKS
std::vector<cmat> result(N);
for (idx i = 0; i < N; ++i)
result[i] = cmat::Zero(D, D);
cmat Fk(D, D);
cmat U = randU(N * D);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(3)
#endif // WITH_OPENMP_
for (idx k = 0; k < N; ++k)
for (idx a = 0; a < D; ++a)
for (idx b = 0; b < D; ++b)
result[k](a, b) = U(a * N + k, b * N);
return result;
}
/**
* \brief Generates a random Hermitian matrix
*
* \param D Dimension of the Hilbert space
* \return Random Hermitian matrix
*/
inline cmat randH(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randH()");
// END EXCEPTION CHECKS
cmat H = 2 * rand<cmat>(D, D) - (1. + 1_i) * cmat::Ones(D, D);
return H + adjoint(H);
}
/**
* \brief Generates a random normalized ket (pure state vector)
*
* \param D Dimension of the Hilbert space
* \return Random normalized ket
*/
inline ket randket(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randket()");
// END EXCEPTION CHECKS
/* slow
ket kt = ket::Ones(D);
ket result = static_cast<ket>(randU(D) * kt);
return result;
*/
ket kt = randn<cmat>(D, 1);
return kt / norm(kt);
}
/**
* \brief Generates a random density matrix
*
* \param D Dimension of the Hilbert space
* \return Random density matrix
*/
inline cmat randrho(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randrho()");
// END EXCEPTION CHECKS
cmat result = 10 * randH(D);
result = result * adjoint(result);
return result / trace(result);
}
/**
* \brief Generates a random uniformly distributed permutation
*
* Uses Knuth shuffle method (as implemented by std::shuffle),
* so that all permutations are equally probable
*
* \param N Size of the permutation
* \return Random permutation of size \a N
*/
inline std::vector<idx> randperm(idx N) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::PermInvalid("qpp::randperm()");
// END EXCEPTION CHECKS
std::vector<idx> result(N);
// fill in increasing order
std::iota(std::begin(result), std::end(result), 0);
// shuffle
#ifdef NO_THREAD_LOCAL_
std::shuffle(std::begin(result), std::end(result),
RandomDevices::get_instance().get_prng());
#else
std::shuffle(std::begin(result), std::end(result),
RandomDevices::get_thread_local_instance().get_prng());
#endif // NO_THREAD_LOCAL_
return result;
}
/**
* \brief Generates a random probability vector uniformly distributed over the
* probability simplex
*
* \param N Size of the probability vector
* \return Random probability vector
*/
inline std::vector<double> randprob(idx N) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::OutOfRange("qpp::randprob()");
// END EXCEPTION CHECKS
std::vector<double> result(N);
// generate
std::exponential_distribution<> ed(1);
for (idx i = 0; i < N; ++i) {
#ifdef NO_THREAD_LOCAL_
result[i] = ed(qpp::RandomDevices::get_instance().get_prng());
#else
result[i] =
ed(qpp::RandomDevices::get_thread_local_instance().get_prng());
#endif // NO_THREAD_LOCAL_
}
// normalize
double sumprob = sum(result);
for (idx i = 0; i < N; ++i)
result[i] /= sumprob;
return result;
}
} /* namespace qpp */
#endif /* RANDOM_H_ */
|
draw-private.h | /*
Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License.
obtain a copy of the License at
http://www.imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore private image drawing methods.
*/
#ifndef _MAGICKCORE_DRAW_PRIVATE_H
#define _MAGICKCORE_DRAW_PRIVATE_H
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/memory_.h"
static inline MagickBooleanType GetFillColor(const DrawInfo *draw_info,
const long x,const long y,PixelPacket *pixel)
{
Image
*pattern;
MagickBooleanType
status;
pattern=draw_info->fill_pattern;
if (pattern == (Image *) NULL)
{
*pixel=draw_info->fill;
return(MagickTrue);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
status=GetOneVirtualMethodPixel(pattern,TileVirtualPixelMethod,
x+pattern->tile_offset.x,y+pattern->tile_offset.y,pixel,
&pattern->exception);
if (pattern->matte == MagickFalse)
pixel->opacity=OpaqueOpacity;
return(status);
}
static inline MagickBooleanType GetStrokeColor(const DrawInfo *draw_info,
const long x,const long y,PixelPacket *pixel)
{
Image
*pattern;
MagickBooleanType
status;
pattern=draw_info->stroke_pattern;
if (pattern == (Image *) NULL)
{
*pixel=draw_info->stroke;
return(MagickTrue);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
status=GetOneVirtualMethodPixel(pattern,TileVirtualPixelMethod,
x+pattern->tile_offset.x,y+pattern->tile_offset.y,pixel,
&pattern->exception);
if (pattern->matte == MagickFalse)
pixel->opacity=OpaqueOpacity;
return(status);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
md-core.c | /******************************************************************************/
/* */
/* Molecular dynamics */
/* ================== */
/* */
/* Copyright (c) 2017 Ruben Felgenhauer, Leonhard Reichenbach */
/* */
/* This file is released under the MIT license. */
/* */
/******************************************************************************/
#include "md-core.h"
void seed() {
struct timeval time;
gettimeofday(&time, NULL);
srand48(((unsigned long long)time.tv_sec * 1000000) + time.tv_usec);
}
/**
* The Lennard Jones potential between two molecules
* @param r the distance between the two molecules
* @return the resulting potential
*/
static double lennjo_potential(double r) {
return 4 * (1/pow(r,12) - 1/pow(r,6));
}
/**
* The force that gets induced by the Lennard jones potential
* @param r the distance between the two molecules
* @return The resulting force
*/
static double lennjo_force(double r) {
return 24 * (2/pow(r,13) - 1/pow(r,7));
}
/**
* Returns a vector from the given coordinates (that also contains the length
* of the vector).
* @param x the x coordinate
* @param y the y coordinate
* @return the resulting vector
*/
static Vector build_vector(double x, double y) {
Vector result;
result.coordinates.x = x;
result.coordinates.y = y;
result.length = sqrt(x*x + y*y);
return result;
}
/**
* Determines the distance between two molecules.
* @param first the first position
* @param second the second position
* @param L the length of the square distance
* @param periodic_boundaries whether to use periodic boundaries
* @return the distance vector
*/
static Vector distance(const Coordinates *first, const Coordinates *second,
SimParams *params) {
double dx = second->x - first->x;
double dy = second->y - first->y;
if (params->periodic_boundaries) {
double L = params->L;
if (fabs(dx) > L / 2) {
dx = dx - copysign(1.0, dx) * L;
}
if (fabs(dy) > L / 2) {
dy = dy - copysign(1.0, dy) * L;
}
}
return build_vector(dx, dy);
}
/**
* Gathers the kinetic energy, potential energy, total energy and temperature
* of the system and writes that data back into their corresponding members
* of data.
* @param data The simulation data
* @param params The simulation parameters
*/
void get_statistics(SimData *data, SimParams *params) {
double Epot = 0.0;
double Ekin = 0.0;
for (uint64_t i=0; i < params->N; i++) {
for (uint64_t j=i+1; j < params->N; j++) {
Vector dist = distance(&(data->molecules[i].curr_pos),
&(data->molecules[j].curr_pos), params);
if(params->dist_threshold == 0.0 || dist.length <= params->dist_threshold){
Epot += lennjo_potential(dist.length);
}
}
Ekin += (data->molecules[i].vel.x * data->molecules[i].vel.x
+ data->molecules[i].vel.y * data->molecules[i].vel.y) / 2;
}
data->kinetic_energy = Ekin;
data->potential_energy = Epot;
data->energy = Ekin + Epot;
data->temperature = Ekin / params->N;
}
/**
* Initializes the molecules array
* @param params the simulation parameters to use
* @return the molecules
*/
static Molecule *init_molecules(SimParams *params) {
Molecule *molecules = malloc(params->N * sizeof(Molecule));
ASSERT(molecules, "Could not initialize molecules");
uint64_t grid_size = ceil(sqrt(params->N));
double grid_dist = params->L / ((double)grid_size);
for (uint64_t i = 0; i < params->N; i++) {
double x_curr = ((((double)(i % grid_size)) + 0.5)
+ params->max_deviation * (drand48() - 0.5) * sqrt(2)) * grid_dist;
double y_curr = ((((double)(i / grid_size)) + 0.5)
+ params->max_deviation * (drand48() - 0.5) * sqrt(2)) * grid_dist;
double v_x = params->max_v0 * (drand48() - 0.5) * sqrt(2);
double v_y = params->max_v0 * (drand48() - 0.5) * sqrt(2);
double x_prev = x_curr - v_x * params->timestep;
double y_prev = y_curr - v_y * params->timestep;
Molecule molecule;
molecule.curr_pos.x = x_curr;
molecule.curr_pos.y = y_curr;
molecule.prev_pos.x = x_prev;
molecule.prev_pos.y = y_prev;
molecule.vel.x = v_x;
molecule.vel.y = v_y;
molecules[i] = molecule;
}
return molecules;
}
void calculate_forces(SimData *data, SimParams *params){
#pragma omp for simd
for (uint64_t i = 0; i < params->N-1; i++) {
for (uint64_t j = i + 1; j < params->N; j++) {
Vector dist = distance(&(data->molecules[i].curr_pos),
&(data->molecules[j].curr_pos),
params);
Coordinates *force_ij = &(data->forces_buffer[i][j]);
Coordinates *force_ji = &(data->forces_buffer[j][i]);
if(dist.length > 0.0 && (params->dist_threshold == 0.0
|| dist.length <= params->dist_threshold)){
double force = lennjo_force(dist.length);
double force_x = force * dist.coordinates.x / dist.length;
double force_y = force * dist.coordinates.y / dist.length;
force_ij->x = force_x;
force_ij->y = force_y;
force_ji->x = -force_x;
force_ji->y = -force_y;
} else {
force_ij->x = 0.0;
force_ij->y = 0.0;
force_ji->x = 0.0;
force_ji->y = 0.0;
}
}
}
}
/**
* Calculates the total force F_i that affects a particle i. This is just the sum
* of all forces F_j,i that act on a particle.
* @param data the simulation data
* @param params the simulation params
* @param i The index of the particle to get the force for.
* @return the resulting force.
*/
static Coordinates get_total_force(SimData *data, SimParams *params, uint64_t i) {
Coordinates result = {.x = 0.0, .y = 0.0};
for (uint64_t j = 0; j < params->N; j++) {
result.x += data->forces_buffer[j][i].x;
result.y += data->forces_buffer[j][i].y;
}
return result;
}
/**
* Corrects a position so that 0 <= position <= L.
* @param curr the current position that is going to be positioned the same way
* relatively to next that it was before the correction was applies. This
* might include putting curr on the other side of next with the same
* distance if the movement of direction is being flipped during
* elastic reflection (if periodic_boundaries == false).
* @param next the next position (the one to correct). If periodic_boundaries,
* next will be teleported to the other side of the lattice. Otherwise,
* it will be reflected elastically.
* @param params the simulation params, including L and periodic_boundaries.
*/
static void fix_boundaries(double *curr, double *next, SimParams *params) {
double L = params->L;
if (*next > L || *next < 0) {
// We'll need this to correct the current position later
double curr_to_next = *next - *curr;
if (params->periodic_boundaries) {
if (*next > L) {
// fmod should return a value between 0.0 and L here.
*next = fmod(*next, L);
} else {
// fmod should return a value between -L and 0.0 here, so position should
// mathematically be between 0.0 and and L, but numerical errors do happen.
*next = L + fmod(*next, L);
// The >= is there to catch numerical errors. The distance to L should be
// at the order of magnitude of machine accuracy. Position can not be greater
// than L mathematically at this point.
if (*next >= L) {
*next = 0.0;
}
}
} else { /* Reflective walls */
double dist_to_border = *next > L ? L - *curr : *curr;
// The distance that remains after the first collision
double rem_initial = fabs(curr_to_next) - dist_to_border;
// The distance that remains after the last collision, if there is going
// to be more than one, according to the velocity.
double rem_final = fmod(rem_initial, L);
// We'll need to turn the direction of movement around, if the particle
// reflects an odd amount of times with any walls.
bool keep_direction = (bool)((int)((rem_initial - rem_final) / L) % 2);
// Determine at which border the particle is going to be placed
if (keep_direction == (*next > L)) {
*next = rem_final;
} else {
*next = L - rem_final;
}
// Determine on which side curr is going to be placed
if (!keep_direction) {
curr_to_next *= -1;
}
}
// Correct curr.
*curr = *next - curr_to_next;
}
}
void apply_forces(SimData *data, SimParams *params){
double squared_timestep = params->timestep * params->timestep;
#pragma omp for simd
for(uint64_t i = 0; i < params->N; i++) {
Coordinates force = get_total_force(data, params, i);
Coordinates *prev_pos = &(data->molecules[i].prev_pos);
Coordinates *curr_pos = &(data->molecules[i].curr_pos);
Coordinates next_pos = {.x = 2 * curr_pos->x - prev_pos->x
+ force.x * squared_timestep,
.y = 2 * curr_pos->y - prev_pos->y
+ force.y * squared_timestep};
data->molecules[i].vel.x = (next_pos.x - prev_pos->x)
/ (2 * params->timestep);
data->molecules[i].vel.y = (next_pos.y - prev_pos->y)
/ (2 * params->timestep);
fix_boundaries(&(curr_pos->x), &(next_pos.x), params);
fix_boundaries(&(curr_pos->y), &(next_pos.y), params);
*prev_pos = *curr_pos;
*curr_pos = next_pos;
}
}
void enable_heat_bath(SimData *data, double target_temperature, uint64_t steps) {
ASSERT(steps, "Steps must be greater than 0");
double alpha_tilde = 3.0;
HeatBath heat_bath;
heat_bath.alpha_tilde = alpha_tilde;
heat_bath.steps_to_the_alpha_tilde = pow(steps, alpha_tilde);
heat_bath.steps = steps;
heat_bath.current_step = 1;
heat_bath.target_temperature = target_temperature;
data->heat_bath = heat_bath;
data->heat_bath_enabled = true;
}
void multiply_velocity(double *curr_pos, double *prev_pos, double *vel, double factor) {
*prev_pos = *curr_pos - (*curr_pos - *prev_pos) * factor;
*vel *= factor;
}
void apply_heat_bath(SimData *data, SimParams *params) {
HeatBath *heat_bath = &(data->heat_bath);
double foo1 = pow(heat_bath->current_step, heat_bath->alpha_tilde);
double foo2 = foo1 / heat_bath->steps_to_the_alpha_tilde;
double foo3 = heat_bath->target_temperature / data->temperature;
double foo4 = pow(foo3, foo2);
for (uint64_t i = 0; i < params->N; i++) {
Coordinates *prev_pos = &(data->molecules[i].prev_pos);
Coordinates *curr_pos = &(data->molecules[i].curr_pos);
Coordinates *vel = &(data->molecules[i].vel);
multiply_velocity(&(curr_pos->x), &(prev_pos->x), &(vel->x), foo4);
multiply_velocity(&(curr_pos->y), &(prev_pos->y), &(vel->y), foo4);
}
if (++(heat_bath->current_step) > heat_bath->steps) {
data->heat_bath_enabled = false;
}
}
void step(SimData *data, SimParams *params) {
#pragma omp parallel
calculate_forces(data, params);
apply_forces(data, params);
get_statistics(data, params);
if (data->heat_bath_enabled) {
apply_heat_bath(data, params);
}
}
SimData init_data(SimParams *params) {
SimData result;
result.molecules = init_molecules(params);
result.forces_buffer = malloc(sizeof(Coordinates*) * params->N);
ASSERT(result.forces_buffer, "Could not allocate memory for forces buffer");
for (uint64_t i = 0; i < params->N; i++) {
result.forces_buffer[i] = malloc(sizeof(Coordinates) * params->N);
ASSERT(result.forces_buffer[i], "Could not allocate memory for forces buffer");
result.forces_buffer[i][i].x = 0.0;
result.forces_buffer[i][i].y = 0.0;
}
result.heat_bath_enabled = false;
return result;
}
SimParams init_params(uint64_t number_of_threads, uint64_t N,
double L, double timestep, double max_deviation,
double max_v0, bool periodic_boundaries, double dist_threshold,
uint64_t iterations) {
SimParams result;
result.number_of_threads = number_of_threads;
result.N = N;
result.L = L;
result.timestep = timestep;
result.max_deviation = max_deviation;
result.max_v0 = max_v0;
result.periodic_boundaries = periodic_boundaries;
result.dist_threshold = dist_threshold;
result.iterations = iterations;
omp_set_num_threads(number_of_threads);
return result;
}
void finalize(SimData *data, SimParams *params) {
free(data->molecules);
for (uint64_t i = 0; i < params->N; i++) {
free(data->forces_buffer[i]);
}
free(data->forces_buffer);
}
|
GB_unop__ainv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint8_uint8)
// op(A') function: GB (_unop_tran__ainv_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
time_c2h6.c | /*
* ethane molecule
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include "cint.h"
void run_all(int *atm, int natm, int *bas, int nbas, double *env);
int cint2e_ip1_sph(double *buf, int *shls,
int *atm, int natm, int *bas, int nbas, double *env,
CINTOpt *opt);
void cint2e_ip1_sph_optimizer(CINTOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env);
int main()
{
int natm = 8;
int nbas = natm*20;
// ATM_SLOTS = 6; BAS_SLOTS = 8;
int *atm = malloc(sizeof(int) * natm * ATM_SLOTS);
int *bas = malloc(sizeof(int) * nbas * BAS_SLOTS);
double *env = malloc(sizeof(double) * 10000);
int i, j, ia, n, off;
off = PTR_ENV_START; // = 20
atm(CHARGE_OF,0)=6; atm(PTR_COORD,0)=off; env[off+0]= 0.000; env[off+1]= 0.000; env[off+2]= 0.769; off+=3;
atm(CHARGE_OF,1)=1; atm(PTR_COORD,1)=off; env[off+0]= 0.000; env[off+1]= 1.014; env[off+2]= 1.174; off+=3;
atm(CHARGE_OF,2)=1; atm(PTR_COORD,2)=off; env[off+0]=-0.878; env[off+1]=-0.507; env[off+2]= 1.174; off+=3;
atm(CHARGE_OF,3)=1; atm(PTR_COORD,3)=off; env[off+0]= 0.878; env[off+1]=-0.507; env[off+2]= 1.174; off+=3;
atm(CHARGE_OF,4)=6; atm(PTR_COORD,4)=off; env[off+0]= 0.000; env[off+1]= 0.000; env[off+2]=-0.769; off+=3;
atm(CHARGE_OF,5)=1; atm(PTR_COORD,5)=off; env[off+0]= 0.000; env[off+1]= 1.014; env[off+2]=-1.174; off+=3;
atm(CHARGE_OF,6)=1; atm(PTR_COORD,6)=off; env[off+0]=-0.878; env[off+1]=-0.507; env[off+2]=-1.174; off+=3;
atm(CHARGE_OF,7)=1; atm(PTR_COORD,7)=off; env[off+0]= 0.878; env[off+1]=-0.507; env[off+2]=-1.174; off+=3;
// 6-31G
env[off+0 ] = 3047.5249; env[off+6 ] = 0.0018347*CINTgto_norm(0,env[off+0 ]);
env[off+1 ] = 457.36951; env[off+7 ] = 0.0140373*CINTgto_norm(0,env[off+1 ]);
env[off+2 ] = 103.94869; env[off+8 ] = 0.0688426*CINTgto_norm(0,env[off+2 ]);
env[off+3 ] = 29.210155; env[off+9 ] = 0.2321844*CINTgto_norm(0,env[off+3 ]);
env[off+4 ] = 9.2866630; env[off+10] = 0.4679413*CINTgto_norm(0,env[off+4 ]);
env[off+5 ] = 3.1639270; env[off+11] = 0.3623120*CINTgto_norm(0,env[off+5 ]);
env[off+12] = 7.8682724; env[off+15] =-0.1193324*CINTgto_norm(0,env[off+12]);
env[off+13] = 1.8812885; env[off+16] =-0.1608542*CINTgto_norm(0,env[off+13]);
env[off+14] = 0.5442493; env[off+17] = 1.1434564*CINTgto_norm(0,env[off+14]);
env[off+18] = 0.1687144; env[off+19] = 1.0000000*CINTgto_norm(0,env[off+18]);
env[off+20] = 7.8682724; env[off+23] = 0.0689991*CINTgto_norm(1,env[off+20]);
env[off+21] = 1.8812885; env[off+24] = 0.3164240*CINTgto_norm(1,env[off+21]);
env[off+22] = 0.5442493; env[off+25] = 0.7443083*CINTgto_norm(1,env[off+22]);
env[off+26] = 0.1687144; env[off+27] = 1.0000000*CINTgto_norm(1,env[off+26]);
env[off+28] = 18.731137; env[off+31] = 0.0334946*CINTgto_norm(0,env[off+28]);
env[off+29] = 2.8253937; env[off+32] = 0.2347269*CINTgto_norm(0,env[off+29]);
env[off+30] = 0.6401217; env[off+33] = 0.8137573*CINTgto_norm(0,env[off+30]);
env[off+34] = 0.1612778; env[off+35] = 1.0000000*CINTgto_norm(0,env[off+34]);
for (i = 0, ia = 0, n = 0; i < 2; i++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 6;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+0;
bas[PTR_COEFF+BAS_SLOTS*n] = off+6;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+12;
bas[PTR_COEFF+BAS_SLOTS*n] = off+15;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+18;
bas[PTR_COEFF+BAS_SLOTS*n] = off+19;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+20;
bas[PTR_COEFF+BAS_SLOTS*n] = off+23;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+26;
bas[PTR_COEFF+BAS_SLOTS*n] = off+27;
n++;
ia++;
for (j = 0; j < 3; j++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+28;
bas[PTR_COEFF+BAS_SLOTS*n] = off+31;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+34;
bas[PTR_COEFF+BAS_SLOTS*n] = off+35;
n++;
ia++;
}
}
nbas = n;
printf("6-31G basis\n");
run_all(atm, natm, bas, nbas, env);
// 6-311G**
env[off+ 0] = 4563.240; env[off+17] = 0.0019666*CINTgto_norm(0,env[off+ 0]);
env[off+ 1] = 682.0240; env[off+18] = 0.0152306*CINTgto_norm(0,env[off+ 1]);
env[off+ 2] = 154.9730; env[off+19] = 0.0761269*CINTgto_norm(0,env[off+ 2]);
env[off+ 3] = 44.45530; env[off+20] = 0.2608010*CINTgto_norm(0,env[off+ 3]);
env[off+ 4] = 13.02900; env[off+21] = 0.6164620*CINTgto_norm(0,env[off+ 4]);
env[off+ 5] = 1.827730; env[off+22] = 0.2210060*CINTgto_norm(0,env[off+ 5]);
env[off+ 6] = 20.96420; env[off+23] = 0.1146600*CINTgto_norm(0,env[off+ 6]);
env[off+ 7] = 4.803310; env[off+24] = 0.9199990*CINTgto_norm(0,env[off+ 7]);
env[off+ 8] = 1.459330; env[off+25] = -0.003030*CINTgto_norm(0,env[off+ 8]);
env[off+ 9] = 0.483456; env[off+26] = 1.0000000*CINTgto_norm(0,env[off+ 9]);
env[off+10] = 0.145585; env[off+27] = 1.0000000*CINTgto_norm(0,env[off+10]);
env[off+11] = 20.96420; env[off+28] = 0.0402487*CINTgto_norm(1,env[off+11]);
env[off+12] = 4.803310; env[off+29] = 0.2375940*CINTgto_norm(1,env[off+12]);
env[off+13] = 1.459330; env[off+30] = 0.8158540*CINTgto_norm(1,env[off+13]);
env[off+14] = 0.483456; env[off+31] = 1.0000000*CINTgto_norm(1,env[off+14]);
env[off+15] = 0.145585; env[off+32] = 1.0000000*CINTgto_norm(1,env[off+15]);
env[off+16] = 0.626000; env[off+33] = 1.0000000*CINTgto_norm(2,env[off+16]);
env[off+34] = 33.86500; env[off+40] = 0.0254938*CINTgto_norm(0,env[off+34]);
env[off+35] = 5.094790; env[off+41] = 0.1903730*CINTgto_norm(0,env[off+35]);
env[off+36] = 1.158790; env[off+42] = 0.8521610*CINTgto_norm(0,env[off+36]);
env[off+37] = 0.325840; env[off+43] = 1.0000000*CINTgto_norm(0,env[off+37]);
env[off+38] = 0.102741; env[off+44] = 1.0000000*CINTgto_norm(0,env[off+38]);
env[off+39] = 0.750000; env[off+45] = 1.0000000*CINTgto_norm(0,env[off+39]);
for (i = 0, ia = 0, n = 0; i < 2; i++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 6;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 0;
bas[PTR_COEFF+BAS_SLOTS*n] = off+17;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 6;
bas[PTR_COEFF+BAS_SLOTS*n] = off+23;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 9;
bas[PTR_COEFF+BAS_SLOTS*n] = off+26;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+10;
bas[PTR_COEFF+BAS_SLOTS*n] = off+27;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+11;
bas[PTR_COEFF+BAS_SLOTS*n] = off+28;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+14;
bas[PTR_COEFF+BAS_SLOTS*n] = off+31;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+15;
bas[PTR_COEFF+BAS_SLOTS*n] = off+32;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+16;
bas[PTR_COEFF+BAS_SLOTS*n] = off+33;
n++;
ia++;
for (j = 0; j < 3; j++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+34;
bas[PTR_COEFF+BAS_SLOTS*n] = off+40;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+37;
bas[PTR_COEFF+BAS_SLOTS*n] = off+43;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+38;
bas[PTR_COEFF+BAS_SLOTS*n] = off+44;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+39;
bas[PTR_COEFF+BAS_SLOTS*n] = off+45;
n++;
ia++;
}
}
nbas = n;
printf("6-311G(dp) basis\n");
run_all(atm, natm, bas, nbas, env);
// cc-pVDZ, C
env[off+ 0] = 6665.0; env[off+ 8]=0.000692*CINTgto_norm(0,env[off+ 0]); env[off+16]=-0.000146*CINTgto_norm(0,env[off+0]);
env[off+ 1] = 1000.0; env[off+ 9]=0.005329*CINTgto_norm(0,env[off+ 1]); env[off+17]=-0.001154*CINTgto_norm(0,env[off+1]);
env[off+ 2] = 228.00; env[off+10]=0.027077*CINTgto_norm(0,env[off+ 2]); env[off+18]=-0.005725*CINTgto_norm(0,env[off+2]);
env[off+ 3] = 64.710; env[off+11]=0.101718*CINTgto_norm(0,env[off+ 3]); env[off+19]=-0.023312*CINTgto_norm(0,env[off+3]);
env[off+ 4] = 21.060; env[off+12]=0.274740*CINTgto_norm(0,env[off+ 4]); env[off+20]=-0.063955*CINTgto_norm(0,env[off+4]);
env[off+ 5] = 7.4950; env[off+13]=0.448564*CINTgto_norm(0,env[off+ 5]); env[off+21]=-0.149981*CINTgto_norm(0,env[off+5]);
env[off+ 6] = 2.7970; env[off+14]=0.285074*CINTgto_norm(0,env[off+ 6]); env[off+22]=-0.127262*CINTgto_norm(0,env[off+6]);
env[off+ 7] = 0.5215; env[off+15]=0.015204*CINTgto_norm(0,env[off+ 7]); env[off+23]= 0.544529*CINTgto_norm(0,env[off+7]);
env[off+24] = 0.1596; env[off+25]=1.000000*CINTgto_norm(0,env[off+24]);
env[off+26] = 9.4390; env[off+29]=0.038109*CINTgto_norm(1,env[off+26]);
env[off+27] = 2.0020; env[off+30]=0.209480*CINTgto_norm(1,env[off+27]);
env[off+28] = 0.5456; env[off+31]=0.508557*CINTgto_norm(1,env[off+28]);
env[off+32] = 0.1517; env[off+33]=1.000000*CINTgto_norm(1,env[off+32]);
env[off+34] = 0.55 ; env[off+35]=1.000000*CINTgto_norm(2,env[off+34]);
// H
env[off+36] = 13.010; env[off+39]=0.019685*CINTgto_norm(0,env[off+36]);
env[off+37] = 1.9620; env[off+40]=0.137977*CINTgto_norm(0,env[off+37]);
env[off+38] = 0.4446; env[off+41]=0.478148*CINTgto_norm(0,env[off+38]);
env[off+42] = 0.1220; env[off+43]=1 *CINTgto_norm(0,env[off+42]);
env[off+44] = 0.7270; env[off+45]=1 *CINTgto_norm(0,env[off+44]);
for (i = 0, ia = 0, n = 0; i < 2; i++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 8;
bas[NCTR_OF +BAS_SLOTS*n] = 2;
bas[PTR_EXP +BAS_SLOTS*n] = off+0;
bas[PTR_COEFF+BAS_SLOTS*n] = off+8;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+24;
bas[PTR_COEFF+BAS_SLOTS*n] = off+25;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+26;
bas[PTR_COEFF+BAS_SLOTS*n] = off+29;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+32;
bas[PTR_COEFF+BAS_SLOTS*n] = off+33;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+34;
bas[PTR_COEFF+BAS_SLOTS*n] = off+35;
n++;
ia++;
for (j = 0; j < 3; j++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+36;
bas[PTR_COEFF+BAS_SLOTS*n] = off+39;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+42;
bas[PTR_COEFF+BAS_SLOTS*n] = off+43;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+44;
bas[PTR_COEFF+BAS_SLOTS*n] = off+45;
n++;
ia++;
}
}
nbas = n;
printf("cc-pVDZ basis\n");
run_all(atm, natm, bas, nbas, env);
// cc-pVTZ
env[off+ 0] = 8236.0; env[off+18]= 0.000531*CINTgto_norm(0,env[off+ 0]); env[off+26]=-0.000113*CINTgto_norm(0,env[off+ 0]);
env[off+ 1] = 1235.0; env[off+19]= 0.004108*CINTgto_norm(0,env[off+ 1]); env[off+27]=-0.000878*CINTgto_norm(0,env[off+ 1]);
env[off+ 2] = 280.80; env[off+20]= 0.021087*CINTgto_norm(0,env[off+ 2]); env[off+28]=-0.004540*CINTgto_norm(0,env[off+ 2]);
env[off+ 3] = 79.270; env[off+21]= 0.081853*CINTgto_norm(0,env[off+ 3]); env[off+29]=-0.018133*CINTgto_norm(0,env[off+ 3]);
env[off+ 4] = 25.590; env[off+22]= 0.234817*CINTgto_norm(0,env[off+ 4]); env[off+30]=-0.055760*CINTgto_norm(0,env[off+ 4]);
env[off+ 5] = 8.9970; env[off+23]= 0.434401*CINTgto_norm(0,env[off+ 5]); env[off+31]=-0.126895*CINTgto_norm(0,env[off+ 5]);
env[off+ 6] = 3.3190; env[off+24]= 0.346129*CINTgto_norm(0,env[off+ 6]); env[off+32]=-0.170352*CINTgto_norm(0,env[off+ 6]);
env[off+ 7] = 0.3643; env[off+25]=-0.008983*CINTgto_norm(0,env[off+ 7]); env[off+33]= 0.598684*CINTgto_norm(0,env[off+ 7]);
env[off+ 8] = 0.9059; env[off+34]= 1.000000*CINTgto_norm(0,env[off+ 8]);
env[off+ 9] = 0.1285; env[off+35]= 1.000000*CINTgto_norm(0,env[off+ 9]);
env[off+10] = 18.710; env[off+36]= 0.014031*CINTgto_norm(1,env[off+10]);
env[off+11] = 4.1330; env[off+37]= 0.086866*CINTgto_norm(1,env[off+11]);
env[off+12] = 1.2000; env[off+38]= 0.290216*CINTgto_norm(1,env[off+12]);
env[off+13] = 0.3827; env[off+39]= 1.000000*CINTgto_norm(1,env[off+13]);
env[off+14] = 0.1209; env[off+40]= 1.000000*CINTgto_norm(1,env[off+14]);
env[off+15] = 1.0970; env[off+41]= 1.000000*CINTgto_norm(2,env[off+15]);
env[off+16] = 0.3180; env[off+42]= 1.000000*CINTgto_norm(2,env[off+16]);
env[off+17] = 0.7610; env[off+43]= 1.000000*CINTgto_norm(3,env[off+17]);
env[off+44] = 33.870; env[off+52]= 0.006068*CINTgto_norm(0,env[off+44]);
env[off+45] = 5.0950; env[off+53]= 0.045308*CINTgto_norm(0,env[off+45]);
env[off+46] = 1.1590; env[off+54]= 0.202822*CINTgto_norm(0,env[off+46]);
env[off+47] = 0.3258; env[off+55]= 1.000000*CINTgto_norm(0,env[off+47]);
env[off+48] = 0.1027; env[off+56]= 1.000000*CINTgto_norm(0,env[off+48]);
env[off+49] = 1.4070; env[off+57]= 1.000000*CINTgto_norm(1,env[off+49]);
env[off+50] = 0.3880; env[off+58]= 1.000000*CINTgto_norm(1,env[off+50]);
env[off+51] = 1.0570; env[off+59]= 1.000000*CINTgto_norm(2,env[off+51]);
for (i = 0, ia = 0, n = 0; i < 2; i++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 8;
bas[NCTR_OF +BAS_SLOTS*n] = 2;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 0;
bas[PTR_COEFF+BAS_SLOTS*n] = off+18;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 8;
bas[PTR_COEFF+BAS_SLOTS*n] = off+34;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 9;
bas[PTR_COEFF+BAS_SLOTS*n] = off+35;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+10;
bas[PTR_COEFF+BAS_SLOTS*n] = off+38;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+13;
bas[PTR_COEFF+BAS_SLOTS*n] = off+39;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+14;
bas[PTR_COEFF+BAS_SLOTS*n] = off+40;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+15;
bas[PTR_COEFF+BAS_SLOTS*n] = off+41;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+16;
bas[PTR_COEFF+BAS_SLOTS*n] = off+42;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 3;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+17;
bas[PTR_COEFF+BAS_SLOTS*n] = off+43;
n++;
ia++;
for (j = 0; j < 3; j++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+44;
bas[PTR_COEFF+BAS_SLOTS*n] = off+52;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+47;
bas[PTR_COEFF+BAS_SLOTS*n] = off+55;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+48;
bas[PTR_COEFF+BAS_SLOTS*n] = off+56;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+49;
bas[PTR_COEFF+BAS_SLOTS*n] = off+57;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+50;
bas[PTR_COEFF+BAS_SLOTS*n] = off+58;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+51;
bas[PTR_COEFF+BAS_SLOTS*n] = off+59;
n++;
ia++;
}
}
nbas = n;
printf("cc-pVTZ basis\n");
run_all(atm, natm, bas, nbas, env);
env[off+ 0] = 33980.; env[off+24]= 0.000091*CINTgto_norm(0,env[off+ 0]); env[off+33]= -0.000019*CINTgto_norm(0,env[off+0]);
env[off+ 1] = 5089.0; env[off+25]= 0.000704*CINTgto_norm(0,env[off+ 1]); env[off+34]= -0.000151*CINTgto_norm(0,env[off+1]);
env[off+ 2] = 1157.0; env[off+26]= 0.003693*CINTgto_norm(0,env[off+ 2]); env[off+35]= -0.000785*CINTgto_norm(0,env[off+2]);
env[off+ 3] = 326.60; env[off+27]= 0.015360*CINTgto_norm(0,env[off+ 3]); env[off+36]= -0.003324*CINTgto_norm(0,env[off+3]);
env[off+ 4] = 106.10; env[off+28]= 0.052929*CINTgto_norm(0,env[off+ 4]); env[off+37]= -0.011512*CINTgto_norm(0,env[off+4]);
env[off+ 5] = 38.110; env[off+29]= 0.147043*CINTgto_norm(0,env[off+ 5]); env[off+38]= -0.034160*CINTgto_norm(0,env[off+5]);
env[off+ 6] = 14.750; env[off+30]= 0.305631*CINTgto_norm(0,env[off+ 6]); env[off+39]= -0.077173*CINTgto_norm(0,env[off+6]);
env[off+ 7] = 6.0350; env[off+31]= 0.399345*CINTgto_norm(0,env[off+ 7]); env[off+40]= -0.141493*CINTgto_norm(0,env[off+7]);
env[off+ 8] = 2.5300; env[off+32]= 0.217051*CINTgto_norm(0,env[off+ 8]); env[off+41]= -0.118019*CINTgto_norm(0,env[off+8]);
env[off+ 9] = 0.7355; env[off+42]= 1.000000*CINTgto_norm(0,env[off+ 9]);
env[off+10] = 0.2905; env[off+43]= 1.000000*CINTgto_norm(0,env[off+10]);
env[off+11] = 0.1111; env[off+44]= 1.000000*CINTgto_norm(0,env[off+11]);
env[off+12] = 34.510; env[off+45]= 0.005378*CINTgto_norm(1,env[off+12]);
env[off+13] = 7.9150; env[off+46]= 0.036132*CINTgto_norm(1,env[off+13]);
env[off+14] = 2.3680; env[off+47]= 0.142493*CINTgto_norm(1,env[off+14]);
env[off+15] = 0.8132; env[off+48]= 1.000000*CINTgto_norm(1,env[off+15]);
env[off+16] = 0.2890; env[off+49]= 1.000000*CINTgto_norm(1,env[off+16]);
env[off+17] = 0.1007; env[off+50]= 1.000000*CINTgto_norm(1,env[off+17]);
env[off+18] = 1.8480; env[off+51]= 1.000000*CINTgto_norm(2,env[off+18]);
env[off+19] = 0.6490; env[off+52]= 1.000000*CINTgto_norm(2,env[off+19]);
env[off+20] = 0.2280; env[off+53]= 1.000000*CINTgto_norm(2,env[off+20]);
env[off+21] = 1.4190; env[off+54]= 1.000000*CINTgto_norm(3,env[off+21]);
env[off+22] = 0.4850; env[off+55]= 1.000000*CINTgto_norm(3,env[off+22]);
env[off+23] = 1.0110; env[off+56]= 1.000000*CINTgto_norm(4,env[off+23]);
env[off+57] = 82.64; env[off+69] = 0.002006*CINTgto_norm(0,env[off+57]);
env[off+58] = 12.41; env[off+70] = 0.015343*CINTgto_norm(0,env[off+58]);
env[off+59] = 2.824; env[off+71] = 0.075579*CINTgto_norm(0,env[off+59]);
env[off+60] = 0.797; env[off+72] = 1.000000*CINTgto_norm(0,env[off+60]);
env[off+61] = 0.258; env[off+73] = 1.000000*CINTgto_norm(0,env[off+61]);
env[off+62] = 0.089; env[off+74] = 1.000000*CINTgto_norm(0,env[off+62]);
env[off+63] = 2.292; env[off+75] = 1.000000*CINTgto_norm(1,env[off+63]);
env[off+64] = 0.838; env[off+76] = 1.000000*CINTgto_norm(1,env[off+64]);
env[off+65] = 0.292; env[off+77] = 1.000000*CINTgto_norm(1,env[off+65]);
env[off+66] = 2.062; env[off+78] = 1.000000*CINTgto_norm(2,env[off+66]);
env[off+67] = 0.662; env[off+79] = 1.000000*CINTgto_norm(2,env[off+67]);
env[off+68] = 1.397; env[off+80] = 1.000000*CINTgto_norm(3,env[off+68]);
for (i = 0, ia = 0, n = 0; i < 2; i++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 8;
bas[NCTR_OF +BAS_SLOTS*n] = 2;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 0;
bas[PTR_COEFF+BAS_SLOTS*n] = off+24;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+ 9;
bas[PTR_COEFF+BAS_SLOTS*n] = off+42;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+10;
bas[PTR_COEFF+BAS_SLOTS*n] = off+43;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+11;
bas[PTR_COEFF+BAS_SLOTS*n] = off+44;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+12;
bas[PTR_COEFF+BAS_SLOTS*n] = off+45;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+15;
bas[PTR_COEFF+BAS_SLOTS*n] = off+48;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+16;
bas[PTR_COEFF+BAS_SLOTS*n] = off+49;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+17;
bas[PTR_COEFF+BAS_SLOTS*n] = off+50;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+18;
bas[PTR_COEFF+BAS_SLOTS*n] = off+51;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+19;
bas[PTR_COEFF+BAS_SLOTS*n] = off+52;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+20;
bas[PTR_COEFF+BAS_SLOTS*n] = off+53;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 3;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+21;
bas[PTR_COEFF+BAS_SLOTS*n] = off+54;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 3;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+22;
bas[PTR_COEFF+BAS_SLOTS*n] = off+55;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 4;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+23;
bas[PTR_COEFF+BAS_SLOTS*n] = off+56;
n++;
ia++;
for (j = 0; j < 3; j++) {
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 3;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+57;
bas[PTR_COEFF+BAS_SLOTS*n] = off+69;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+60;
bas[PTR_COEFF+BAS_SLOTS*n] = off+72;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+61;
bas[PTR_COEFF+BAS_SLOTS*n] = off+73;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 0;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+62;
bas[PTR_COEFF+BAS_SLOTS*n] = off+74;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+63;
bas[PTR_COEFF+BAS_SLOTS*n] = off+75;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+64;
bas[PTR_COEFF+BAS_SLOTS*n] = off+76;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 1;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+65;
bas[PTR_COEFF+BAS_SLOTS*n] = off+77;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+66;
bas[PTR_COEFF+BAS_SLOTS*n] = off+78;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 2;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+67;
bas[PTR_COEFF+BAS_SLOTS*n] = off+79;
n++;
bas[ATOM_OF +BAS_SLOTS*n] = ia;
bas[ANG_OF +BAS_SLOTS*n] = 3;
bas[NPRIM_OF +BAS_SLOTS*n] = 1;
bas[NCTR_OF +BAS_SLOTS*n] = 1;
bas[PTR_EXP +BAS_SLOTS*n] = off+68;
bas[PTR_COEFF+BAS_SLOTS*n] = off+80;
n++;
ia++;
}
}
nbas = n;
printf("cc-pVQZ basis\n");
run_all(atm, natm, bas, nbas, env);
free(atm);
free(bas);
free(env);
}
void run_all(int *atm, int natm, int *bas, int nbas, double *env)
{
int i, j, k, l, ij, kl;
int di, dj, dk, dl;
int kl_max;
int shls[4];
double *buf;
int *ishls = malloc(sizeof(int)*nbas*nbas);
int *jshls = malloc(sizeof(int)*nbas*nbas);
for (i = 0, ij = 0; i < nbas; i++) {
for (j = 0; j <= i; j++, ij++) {
ishls[ij] = i;
jshls[ij] = j;
}
}
int ncgto = CINTtot_cgto_spheric(bas, nbas);
printf("\tshells = %d, total cGTO = %d, total pGTO = %d\n",
nbas, ncgto,
CINTtot_pgto_spheric(bas, nbas));
int pct, count;
double time0, time1 = 0;
double tt, tot;
tot = (double)ncgto*ncgto*ncgto*ncgto/8;
time0 = omp_get_wtime();
CINTOpt *non_opt = NULL;
CINTOpt *opt_for_cint2e = NULL;
cint2e_sph_optimizer(&opt_for_cint2e, atm, natm, bas, nbas, env);
CINTOpt *opt_for_ip1 = NULL;
cint2e_ip1_sph_optimizer(&opt_for_ip1, atm, natm, bas, nbas, env);
printf("\tcint2e_sph without optimizer: total num ERI = %.2e\n", tot);
pct = 0; count = 0;
#pragma omp parallel default(none) \
shared(atm, natm, bas, nbas, env, ishls, jshls, non_opt, time0, pct, count, stdout) \
private(di, dj, dk, dl, i, j, k, l, ij, kl, kl_max, shls, buf, time1)
#pragma omp for nowait schedule(dynamic, 2)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
i = ishls[ij];
j = jshls[ij];
di = CINTcgto_spheric(i, bas);
dj = CINTcgto_spheric(j, bas);
// when ksh==ish, there exists k<i, so it's possible kl>ij
kl_max = (i+1)*(i+2)/2;
for (kl = 0; kl < kl_max; kl++) {
k = ishls[kl];
l = jshls[kl];
dk = CINTcgto_spheric(k, bas);
dl = CINTcgto_spheric(l, bas);
shls[0] = i;
shls[1] = j;
shls[2] = k;
shls[3] = l;
buf = malloc(sizeof(double) * di*dj*dk*dl);
cint2e_sph(buf, shls, atm, natm, bas, nbas, env, non_opt);
free(buf);
}
count += kl_max;
if (100*count/((long)nbas*nbas*(nbas+1)*(nbas+2)/8) > pct) {
pct++;
time1 = omp_get_wtime();
printf("\t%d%%, CPU time = %8.2f\r", pct, time1-time0);
fflush(stdout);
}
}
time1 = omp_get_wtime();
tt = time1-time0;
printf("\t100%%, CPU time = %8.2f, %8.4f Mflops\n",
tt, tot/1e6/tt);
/* */
time0 = time1;
printf("\tcint2e_sph with optimizer: total num ERI = %.2e\n", tot);
pct = 0; count = 0;
#pragma omp parallel default(none) \
shared(atm, natm, bas, nbas, env, ishls, jshls, opt_for_cint2e, time0, pct, count, stdout) \
private(di, dj, dk, dl, i, j, k, l, ij, kl, kl_max, shls, buf, time1)
#pragma omp for nowait schedule(dynamic, 2)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
i = ishls[ij];
j = jshls[ij];
di = CINTcgto_spheric(i, bas);
dj = CINTcgto_spheric(j, bas);
// when ksh==ish, there exists k<i, so it's possible kl>ij
kl_max = (i+1)*(i+2)/2;
for (kl = 0; kl < kl_max; kl++) {
k = ishls[kl];
l = jshls[kl];
dk = CINTcgto_spheric(k, bas);
dl = CINTcgto_spheric(l, bas);
shls[0] = i;
shls[1] = j;
shls[2] = k;
shls[3] = l;
buf = malloc(sizeof(double) * di*dj*dk*dl);
cint2e_sph(buf, shls, atm, natm, bas, nbas, env, opt_for_cint2e);
free(buf);
}
count += kl_max;
if (100*count/((long)nbas*nbas*(nbas+1)*(nbas+2)/8) > pct) {
pct++;
time1 = omp_get_wtime();
printf("\t%d%%, CPU time = %8.2f\r", pct, time1-time0);
fflush(stdout);
}
}
time1 = omp_get_wtime();
tt = time1-time0;
printf("\t100%%, CPU time = %8.2f, %8.4f Mflops\n",
tt, tot/1e6/tt);
/* */
time0 = time1;
tot = (double)ncgto*ncgto*ncgto*ncgto/2*3;
printf("\tGradients with optimizer: total num ERI = %.2e\n", tot);
pct = 0; count = 0;
#pragma omp parallel default(none) \
shared(atm, natm, bas, nbas, env, ishls, jshls, opt_for_ip1, time0, pct, count, stdout) \
private(di, dj, dk, dl, i, j, k, l, ij, kl, shls, buf, time1)
#pragma omp for nowait schedule(dynamic, 2)
for (ij = 0; ij < nbas*nbas; ij++) {
i = ij / nbas;
j = ij - nbas*i;
di = CINTcgto_spheric(i, bas);
dj = CINTcgto_spheric(j, bas);
for (kl = 0; kl < nbas*(nbas+1)/2; kl++) {
k = ishls[kl];
l = jshls[kl];
dk = CINTcgto_spheric(k, bas);
dl = CINTcgto_spheric(l, bas);
shls[0] = i;
shls[1] = j;
shls[2] = k;
shls[3] = l;
buf = malloc(sizeof(double) * di*dj*dk*dl*3);
cint2e_ip1_sph(buf, shls, atm, natm, bas, nbas, env, opt_for_ip1);
free(buf);
}
count += nbas*(nbas+1)/2;
if (100*count/((long)nbas*nbas*nbas*(nbas+1)/2) > pct) {
pct++;
time1 = omp_get_wtime();
printf("\t%d%%, CPU time = %8.2f\r", pct,
time1-time0);
fflush(stdout);
}
}
time1 = omp_get_wtime();
tt = time1-time0;
printf("\t100%%, CPU time = %8.2f, %8.4f Mflops\n", tt, tot/1e6/tt);
CINTdel_optimizer(&opt_for_cint2e);
CINTdel_optimizer(&opt_for_ip1);
free(ishls);
free(jshls);
}
|
opencl_tc_fmt_plug.c | /*
* TrueCrypt volume OpenCL support to John The Ripper (RIPEMD-160 only)
*
* Based on CPU format originally written by Alain Espinosa <alainesp at
* gmail.com> in 2012.
* Copyright (c) 2015, magnum
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if HAVE_OPENCL
#define FMT_STRUCT fmt_ocl_tc
#if FMT_EXTERNS_H
extern struct fmt_main FMT_STRUCT;
#elif FMT_REGISTERS_H
john_register_one(&FMT_STRUCT);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "options.h"
#include "formats.h"
#include "crc32.h"
#include "johnswap.h"
#include "aes.h"
#include "pbkdf2_hmac_ripemd160.h"
#include "common-opencl.h"
#define FORMAT_LABEL "truecrypt-opencl"
#define FORMAT_NAME "TrueCrypt AES256_XTS"
#define ALGORITHM_NAME "RIPEMD160 OpenCL"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
/* 64 is the actual maximum used by Truecrypt software as of version 7.1a */
#define PLAINTEXT_LENGTH 64
#define MAX_CIPHERTEXT_LENGTH (512*2+32)
#define SALT_SIZE sizeof(struct cust_salt)
#define SALT_ALIGN 4
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define TAG_RIPEMD160 "truecrypt_RIPEMD_160$"
#define TAG_RIPEMD160_LEN (sizeof(TAG_RIPEMD160)-1)
#define IS_RIPEMD160 2
#define MAX_PASSSZ 64
#define PASS_BUFSZ 256
#define KPOOL_SZ 64
#define MAX_KFILE_SZ 1048576 /* 1 MB */
#define MAX_KEYFILES 256
static unsigned char (*first_block_dec)[16];
unsigned char (*keyfiles_data)[MAX_KFILE_SZ];
int (*keyfiles_length);
#define KEYLEN PLAINTEXT_LENGTH
#define OUTLEN 64
#define SALTLEN 64
typedef struct {
unsigned int length;
unsigned char v[KEYLEN];
} pbkdf2_password;
typedef struct {
unsigned int v[(OUTLEN+3)/4];
} pbkdf2_hash;
typedef struct {
unsigned char salt[SALTLEN];
} pbkdf2_salt;
struct cust_salt {
unsigned char salt[64];
unsigned char bin[512-64];
int loop_inc;
int num_iterations;
int hash_type;
int nkeyfiles;
} *psalt;
static struct fmt_tests tests_ripemd160[] = {
{"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" },
{"truecrypt_RIPEMD_160$6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" },
{NULL}
};
static cl_int cl_error;
static pbkdf2_password *inbuffer;
static pbkdf2_hash *outbuffer;
static pbkdf2_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
static size_t insize, outsize, settingsize;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(pbkdf2_password) * gws;
outsize = sizeof(pbkdf2_hash) * gws;
settingsize = sizeof(pbkdf2_salt);
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
first_block_dec = mem_calloc(gws, sizeof(*first_block_dec));
keyfiles_data = mem_calloc(MAX_KEYFILES, sizeof(*keyfiles_data));
keyfiles_length = mem_calloc(MAX_KEYFILES, sizeof(int));
}
static void release_clobj(void)
{
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(first_block_dec);
MEM_FREE(keyfiles_data);
MEM_FREE(keyfiles_length);
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_ripemd160_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "pbkdf2_ripemd160",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(pbkdf2_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char* ciphertext, struct fmt_main *self)
{
unsigned int i;
char *p, *q;
int nkeyfiles = -1;
if (strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN))
return 0;
ciphertext += TAG_RIPEMD160_LEN;
p = ciphertext;
q = strchr(p, '$');
if (!q) { /* no keyfiles */
if (strlen(ciphertext) != 512*2)
return 0;
} else {
if (q - p != 512 * 2)
return 0;
/* check keyfile(s) */
p = q + 1;
nkeyfiles = atoi(p);
if (nkeyfiles > MAX_KEYFILES || nkeyfiles < 1)
return 0;
}
for (i = 0; i < 512*2; i++) {
if (atoi16l[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
psalt = salt;
memcpy((char*)currentsalt.salt, psalt->salt, SALTLEN);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
static void* get_salt(char *ciphertext)
{
static char buf[sizeof(struct cust_salt)+4];
struct cust_salt *s = (struct cust_salt*)mem_align(buf, 4);
unsigned int i;
char tpath[PATH_BUFFER_SIZE] = { 0 };
char *p, *q;
int idx;
FILE *fp;
size_t sz;
memset(s, 0, sizeof(struct cust_salt));
s->loop_inc = 1;
ciphertext += TAG_RIPEMD160_LEN;
s->hash_type = IS_RIPEMD160;
s->num_iterations = 2000;
// Convert the hexadecimal salt in binary
for (i = 0; i < 64; i++)
s->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
for (; i < 512; i++)
s->bin[i-64] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
p = ciphertext;
q = strchr(p, '$');
if (!q) /* no keyfiles */
return s;
// process keyfile(s)
p = q + 1;
s->nkeyfiles = atoi(p);
for (idx = 0; idx < s->nkeyfiles; idx++) {
p = strchr(p, '$') + 1; // at first filename
q = strchr(p, '$');
if (!q) { // last file
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, sizeof(tpath));
} else {
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, q-p);
}
/* read this into keyfiles_data[idx] */
fp = fopen(tpath, "rb");
if (!fp)
pexit("fopen %s", p);
if (fseek(fp, 0L, SEEK_END) == -1)
pexit("fseek");
sz = ftell(fp);
if (fseek(fp, 0L, SEEK_SET) == -1)
pexit("fseek");
if (fread(keyfiles_data[idx], 1, sz, fp) != sz)
pexit("fread");
keyfiles_length[idx] = sz;
fclose(fp);
}
return s;
}
static void AES_256_XTS_first_sector(const unsigned char *double_key,
unsigned char *out,
const unsigned char *data,
unsigned len) {
unsigned char tweak[16] = { 0 };
unsigned char buf[16];
int i, j, cnt;
AES_KEY key1, key2;
AES_set_decrypt_key(double_key, 256, &key1);
AES_set_encrypt_key(&double_key[32], 256, &key2);
// first aes tweak (we do it right over tweak
AES_encrypt(tweak, tweak, &key2);
cnt = len/16;
for (j=0;;) {
for (i = 0; i < 16; ++i) buf[i] = data[i]^tweak[i];
AES_decrypt(buf, out, &key1);
for (i = 0; i < 16; ++i) out[i]^=tweak[i];
++j;
if (j == cnt)
break;
else {
unsigned char Cin, Cout;
unsigned x;
Cin = 0;
for (x = 0; x < 16; ++x) {
Cout = (tweak[x] >> 7) & 1;
tweak[x] = ((tweak[x] << 1) + Cin) & 0xFF;
Cin = Cout;
}
if (Cout)
tweak[0] ^= 135; //GF_128_FDBK;
}
data += 16;
out += 16;
}
}
static int apply_keyfiles(unsigned char *pass, size_t pass_memsz, int nkeyfiles)
{
int pl, k;
unsigned char *kpool;
unsigned char *kdata;
int kpool_idx;
size_t i, kdata_sz;
uint32_t crc;
if (pass_memsz < MAX_PASSSZ) {
error();
}
pl = strlen((char*)pass);
memset(pass+pl, 0, MAX_PASSSZ-pl);
if ((kpool = mem_calloc(1, KPOOL_SZ)) == NULL) {
error();
}
for (k = 0; k < nkeyfiles; k++) {
kpool_idx = 0;
kdata_sz = keyfiles_length[k];
kdata = keyfiles_data[k];
crc = ~0U;
for (i = 0; i < kdata_sz; i++) {
crc = jtr_crc32(crc, kdata[i]);
kpool[kpool_idx++] += (unsigned char)(crc >> 24);
kpool[kpool_idx++] += (unsigned char)(crc >> 16);
kpool[kpool_idx++] += (unsigned char)(crc >> 8);
kpool[kpool_idx++] += (unsigned char)(crc);
/* Wrap around */
if (kpool_idx == KPOOL_SZ)
kpool_idx = 0;
}
}
/* Apply keyfile pool to passphrase */
for (i = 0; i < KPOOL_SZ; i++)
pass[i] += kpool[i];
MEM_FREE(kpool);
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (psalt->nkeyfiles) {
#if _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
apply_keyfiles(inbuffer[i].v, 64, psalt->nkeyfiles);
inbuffer[i].length = 64;
}
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#if _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
AES_256_XTS_first_sector((unsigned char*)outbuffer[i].v, first_block_dec[i], psalt->bin, 16);
}
return count;
}
static int cmp_all(void* binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (!memcmp(first_block_dec[i], "TRUE", 4))
return 1;
}
return 0;
}
static int cmp_one(void* binary, int index)
{
if (!memcmp(first_block_dec[index], "TRUE", 4))
return 1;
return 0;
}
static int cmp_crc32s(unsigned char *given_crc32, CRC32_t comp_crc32) {
return given_crc32[0] == ((comp_crc32>>24)&0xFF) &&
given_crc32[1] == ((comp_crc32>>16)&0xFF) &&
given_crc32[2] == ((comp_crc32>> 8)&0xFF) &&
given_crc32[3] == ((comp_crc32>> 0)&0xFF);
}
static int cmp_exact(char *source, int idx)
{
unsigned char key[64];
unsigned char decr_header[512-64];
CRC32_t check_sum;
int ksz = inbuffer[idx].length;
memcpy(key, inbuffer[idx].v, inbuffer[idx].length);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(key, 64, psalt->nkeyfiles);
ksz = 64;
}
pbkdf2_ripemd160(key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
AES_256_XTS_first_sector(key, decr_header, psalt->bin, 512-64);
if (memcmp(decr_header, "TRUE", 4))
return 0;
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, &decr_header[256-64], 256);
if (!cmp_crc32s(&decr_header[8], ~check_sum))
return 0;
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, decr_header, 256-64-4);
if (!cmp_crc32s(&decr_header[256-64-4], ~check_sum))
return 0;
return 1;
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int salt_hash(void *salt)
{
unsigned v=0, i;
struct cust_salt *psalt = (struct cust_salt*)salt;
for (i = 0; i < 64; ++i) {
v *= 11;
v += psalt->salt[i];
}
return v & (SALT_HASH_SIZE - 1);
}
struct fmt_main FMT_STRUCT = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests_ripemd160
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
daxpy_openmp_lock.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define VECTOR_SIZE 100
void skip(int i) {}
void work(int i) {}
int main( int argc, char* argv[] ){
int i,id;
double alpha=1.0;
// Number of bytes to allocate for N doubles
size_t bytes = VECTOR_SIZE*sizeof(double);
// Allocate memory for arrays X, A, B, and C on host
double *X = (double*)malloc(bytes);
double *A = (double*)malloc(bytes);
double *B = (double*)malloc(bytes);
double *C = (double*)malloc(bytes);
omp_lock_t lck;
omp_init_lock(&lck);
#pragma omp parallel for shared(X,A,B,lck) private(i,id)
for(i=0;i<VECTOR_SIZE;i++){
id = omp_get_thread_num();
omp_set_lock(&lck);
/* only one thread at a time can execute this printf */
printf("My thread id is %d working on i=%d\n", id,i);
omp_unset_lock(&lck);
while (! omp_test_lock(&lck)) {
skip(id); /* we do not yet have the lock,
so we must do something else */
}
work(id); /* we now have the lock
and can do the work */
omp_unset_lock(&lck);
X[i]=M_PI*(double)(i+1)/VECTOR_SIZE;
A[i]=cos(X[i])*cos(X[i]);
B[i]=sin(X[i])*sin(X[i]);
}
#pragma omp parallel for shared(alpha,A,B,C,lck) private(i,id) schedule(static,10)
for(i=0;i<VECTOR_SIZE;i++){
id = omp_get_thread_num();
omp_set_lock(&lck);
/* only one thread at a time can execute this printf */
printf("My thread id is %d working on i=%d\n", id,i);
omp_unset_lock(&lck);
while (! omp_test_lock(&lck)) {
skip(id); /* we do not yet have the lock,
so we must do something else */
}
work(id); /* we now have the lock
and can do the work */
omp_unset_lock(&lck);
C[i]=alpha*A[i]-B[i];
printf("i=%d X=%f alpha*A-B=%f\n",i,X[i],C[i]);
}
omp_destroy_lock(&lck);
return 0;
}
|
fasta.c | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by Jeremy Zerfas
// rewritten by Аноним Легионов, inspired by fasta Rust #2 program
// use two OpenMP locks instead of one critical section
// decouples IO activity from random number generation
//
// modified by Josh Goldfoot, adding use of a buffer for fasta_repeat
// This controls the width of lines that are output by this program.
#define MAXIMUM_LINE_WIDTH 60
// This program will generate the random nucleotide sequences in parallel which
// are worked on in blocks of lines. The number of lines in those blocks is
// controlled by this setting.
#define LINES_PER_BLOCK 1024
#define CHARACTERS_PER_BLOCK (MAXIMUM_LINE_WIDTH*LINES_PER_BLOCK)
#define THREADS_TO_USE 4
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
typedef struct
{
char letter;
float probability;
} nucleotide_info;
// Repeatedly print string_To_Repeat until it has printed
// number_Of_Characters_To_Create. The output is also wrapped to
// MAXIMUM_LINE_WIDTH columns.
static char* make_Sequence_Buffer(const char string_To_Repeat[])
{
const intnative_t string_To_Repeat_Length = strlen(string_To_Repeat);
// JG: Changed to that this writes a sequence to a buffer, which is used
intnative_t number_Of_Characters_To_Create = string_To_Repeat_Length * MAXIMUM_LINE_WIDTH;
char* buffer = (char*) malloc(number_Of_Characters_To_Create +
number_Of_Characters_To_Create / MAXIMUM_LINE_WIDTH + 1);
if (buffer == NULL)
exit(-1);
char* bufferOffset = buffer;
// Create an extended_String_To_Repeat which is a copy of string_To_Repeat
// but extended with another copy of the first MAXIMUM_LINE_WIDTH characters
// of string_To_Repeat appended to the end. Later on this allows us to
// generate a line of output just by doing simple memory copies using an
// appropriate offset into extended_String_To_Repeat.
char extended_String_To_Repeat[string_To_Repeat_Length + MAXIMUM_LINE_WIDTH];
for (intnative_t column = 0; column<string_To_Repeat_Length + MAXIMUM_LINE_WIDTH;
column++)
extended_String_To_Repeat[column] =
string_To_Repeat[column%string_To_Repeat_Length];
intnative_t offset = 0;
char line[MAXIMUM_LINE_WIDTH + 1];
line[MAXIMUM_LINE_WIDTH] = '\n';
for (intnative_t current_Number_Of_Characters_To_Create =
number_Of_Characters_To_Create;
current_Number_Of_Characters_To_Create>0;)
{
// Figure out the length of the line we need to write. If it's less than
// MAXIMUM_LINE_WIDTH then we also need to add a line feed in the right
// spot too.
intnative_t line_Length = MAXIMUM_LINE_WIDTH;
if (current_Number_Of_Characters_To_Create<MAXIMUM_LINE_WIDTH)
{
line_Length = current_Number_Of_Characters_To_Create;
line[line_Length] = '\n';
}
memcpy(line, extended_String_To_Repeat + offset, line_Length);
// Update the offset, reducing it by string_To_Repeat_Length if
// necessary.
offset += line_Length;
if (offset>string_To_Repeat_Length)
offset -= string_To_Repeat_Length;
// "Output" that line to our buffer and update the
// current_Number_Of_Characters_To_Create.
memcpy(bufferOffset, line, line_Length + 1); // JG: used to be fwrite(line, line_Length + 1, 1, stdout);
bufferOffset += line_Length + 1;
current_Number_Of_Characters_To_Create -= line_Length;
}
*bufferOffset = 0;
return buffer;
}
void repeat_And_Wrap_String(const char string_To_Repeat[], intnative_t number_Of_Characters_To_Create)
{
/* JG: fasta_repeat repeats every len(alu) * line-length = 287 * 61 = 17507 characters.
So, calculate this once, then just print that buffer over and over. */
char* sequence = make_Sequence_Buffer(string_To_Repeat);
intnative_t sequenceLen = (intnative_t) strlen(sequence);
intnative_t outputBytes = number_Of_Characters_To_Create + number_Of_Characters_To_Create / 60;
while (outputBytes >= sequenceLen) {
fwrite(sequence, sequenceLen, 1, stdout);
outputBytes -= sequenceLen;
}
if (outputBytes > 0) {
fwrite(sequence, outputBytes, 1, stdout);
printf("\n");
}
free(sequence);
}
// Generate a pseudorandom number from 0 to max using a linear
// congruential generator.
#define IM 139968
#define IA 3877
#define IC 29573
uint32_t seed = 42;
int rng_tid; //Thread ID
int rng_tnum = 1; //Thread number
intnative_t rng_cnt = 0;
#ifdef _OPENMP
omp_lock_t rng_lock;
#define RNG_LOCK_INIT() omp_init_lock(&rng_lock)
#define RNG_LOCK() omp_set_lock(&rng_lock)
#define RNG_FREE() omp_unset_lock(&rng_lock)
#else
#define RNG_LOCK_INIT() do{}while(0)
#define RNG_LOCK() do{}while(0)
#define RNG_FREE() do{}while(0)
#endif
static void rng_init(void)
{
RNG_LOCK_INIT();
rng_tid = 0;
}
static intnative_t rng_gen_blk(uint32_t * buf, intnative_t len, int curr_tid)
{
intnative_t gen_cnt = -1;//Error by default
RNG_LOCK();
if (rng_tid == curr_tid)
{
if (++rng_tid >= rng_tnum)
{
rng_tid = 0;
}
gen_cnt = (len<rng_cnt) ? len : rng_cnt;
rng_cnt -= gen_cnt;
len = gen_cnt;
while (0 != len--)
{
seed = (seed*IA + IC) % IM;
*(buf++) = seed;//This is stupid actually!
}
}
RNG_FREE();
return gen_cnt;
}
int out_tid; //Thread ID
int out_tnum = 1; //Thread number
#ifdef _OPENMP
omp_lock_t out_lock;
#define OUT_LOCK_INIT() omp_init_lock(&out_lock)
#define OUT_LOCK() omp_set_lock(&out_lock)
#define OUT_FREE() omp_unset_lock(&out_lock)
#else
#define OUT_LOCK_INIT() do{}while(0)
#define OUT_LOCK() do{}while(0)
#define OUT_FREE() do{}while(0)
#endif
static void out_init(void)
{
OUT_LOCK_INIT();
rng_tid = 0;
}
static intnative_t out_write(char * buf, intnative_t len, int curr_tid)
{
intnative_t wr_cnt = -1;//Error by default
OUT_LOCK();
if (out_tid == curr_tid)
{
if (++out_tid >= out_tnum)
{
out_tid = 0;
}
wr_cnt = fwrite(buf, len, 1, stdout);
}
OUT_FREE();
return wr_cnt; //-1 - thread error, 0 - IO error, 1 - ОК
}
static void generate_And_Wrap_Pseudorandom_DNA_Sequence(
const nucleotide_info nucl_info[],
const intnative_t nucl_num,
const intnative_t char_num)
{
uint32_t cumul_p[nucl_num];
float cumul_acc = 0.0;
for (intnative_t i = 0; i<nucl_num; i++)
{
cumul_acc += nucl_info[i].probability;
cumul_p[i] = 1ul + (uint32_t)(cumul_acc*(float)IM); //Compensate rounding errors on test file
}
#ifdef _OPENMP
intnative_t tnum = omp_get_num_procs();
if (tnum>THREADS_TO_USE) tnum = THREADS_TO_USE;
omp_set_num_threads(tnum);
rng_tnum = tnum;
out_tnum = tnum;
#endif
rng_tid = 0;
out_tid = 0;
rng_cnt = char_num;
#pragma omp parallel
{
char block[CHARACTERS_PER_BLOCK + LINES_PER_BLOCK];
char * line;
uint32_t rnd[CHARACTERS_PER_BLOCK], r;
intnative_t cnt, col, prid, nid, ncnt;
int cur_tid;
#ifdef _OPENMP
cur_tid = omp_get_thread_num();
#else
cur_tid = 0;
#endif
while (1)
{
do
{
cnt = rng_gen_blk(rnd, CHARACTERS_PER_BLOCK, cur_tid);
} while (-1 == cnt);
if (0 == cnt)
{
break;//Work finished!
}
line = block;
for (col = 0, prid = 0; prid < cnt; prid++)
{
r = rnd[prid];
ncnt = 0;
for (nid = 0; nid < nucl_num; nid++)
{
if (cumul_p[nid] <= r)
{
ncnt++;
}
}
*line++ = nucl_info[ncnt].letter;
if (++col >= MAXIMUM_LINE_WIDTH)
{
col = 0;
*line++ = '\n';
}
}
//Check if we need to end the line
if (0 != col)
{
//Last iteration didn't end the line, so finish the job.
*line++ = '\n';
}
//Print results
do
{
cnt = out_write(block, line - block, cur_tid);
} while (-1 == cnt);
//Check fot IO error
if (0 == cnt)
{
exit(1);
}
}
}
}
int main(int argc, char ** argv)
{
const intnative_t n = atoi(argv[1]);
fputs(">ONE Homo sapiens alu\n", stdout);
const char homo_Sapiens_Alu[] =
"GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTC"
"AGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCG"
"TGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGG"
"AGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
repeat_And_Wrap_String(homo_Sapiens_Alu, 2 * n);
rng_init();
out_init();
fputs(">TWO IUB ambiguity codes\n", stdout);
nucleotide_info iub_Nucleotides_Information[] =
{
{ 'a', 0.27 },{ 'c', 0.12 },{ 'g', 0.12 },{ 't', 0.27 },{ 'B', 0.02 },
{ 'D', 0.02 },{ 'H', 0.02 },{ 'K', 0.02 },{ 'M', 0.02 },{ 'N', 0.02 },
{ 'R', 0.02 },{ 'S', 0.02 },{ 'V', 0.02 },{ 'W', 0.02 },{ 'Y', 0.02 }
};
generate_And_Wrap_Pseudorandom_DNA_Sequence(iub_Nucleotides_Information,
sizeof(iub_Nucleotides_Information) / sizeof(nucleotide_info), 3 * n);
fputs(">THREE Homo sapiens frequency\n", stdout);
nucleotide_info homo_Sapien_Nucleotides_Information[] =
{
{ 'a', 0.3029549426680 },{ 'c', 0.1979883004921 },
{ 'g', 0.1975473066391 },{ 't', 0.3015094502008 }
};
generate_And_Wrap_Pseudorandom_DNA_Sequence(homo_Sapien_Nucleotides_Information,
sizeof(homo_Sapien_Nucleotides_Information) / sizeof(nucleotide_info), 5 * n);
return 0;
}
|
GB_unop__ainv_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_fc32_fc32
// op(A') function: GB_unop_tran__ainv_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_FC32_ainv (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_FC32_ainv (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_FC32_ainv (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_FC32_ainv (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int16
// op(A') function: GB_tran__lnot_fp32_int16
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int16
(
float *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-begin-declare-variant_6.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
int also_before(void) {
return 0;
}
#pragma omp begin declare variant match(implementation={vendor(ibm)})
int also_after(void) {
return 1;
}
int also_before(void) {
return 2;
}
#pragma omp end declare variant
int also_after(void) {
return 0;
}
int main(void) {
// Should return 0.
return also_after() + also_before();
}
// Make sure:
// - we see the specialization in the AST
// - we do use the original pointers for the calls as the variants are not applicable (this is not the ibm compiler).
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(ibm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(ibm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(ibm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(ibm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})'
// CHECK-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:25:1> line:22:5 main 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:16, line:25:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, col:37>
// CHECK-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <col:10, col:37> 'int' '+'
// CHECK-NEXT: |-CallExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_31:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
|
dynamic_module_load.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-aarch64-unknown-linux-gnu -ldl && %libomptarget-run-aarch64-unknown-linux-gnu %t.so 2>&1 | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-powerpc64-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-powerpc64le-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64le-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -shared -o %t.so && %clang %flags %s -o %t-x86_64-pc-linux-gnu -ldl && %libomptarget-run-x86_64-pc-linux-gnu %t.so 2>&1 | %fcheck-x86_64-pc-linux-gnu
#ifdef SHARED
#include <stdio.h>
int foo() {
#pragma omp target
;
printf("%s\n", "DONE.");
return 0;
}
#else
#include <dlfcn.h>
#include <stdio.h>
int main(int argc, char **argv) {
void *Handle = dlopen(argv[1], RTLD_NOW);
int (*Foo)(void);
if (Handle == NULL) {
printf("dlopen() failed: %s\n", dlerror());
return 1;
}
Foo = (int (*)(void)) dlsym(Handle, "foo");
if (Handle == NULL) {
printf("dlsym() failed: %s\n", dlerror());
return 1;
}
// CHECK: DONE.
// CHECK-NOT: {{abort|fault}}
return Foo();
}
#endif
|
pintgr.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB LU code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <stdio.h>
#include "applu.incl"
void pintgr()
{
//---------------------------------------------------------------------
// local variables
//---------------------------------------------------------------------
int i, j, k;
int ibeg, ifin, ifin1;
int jbeg, jfin, jfin1;
double phi1[ISIZ3+2][ISIZ2+2];
double phi2[ISIZ3+2][ISIZ2+2];
double frc1, frc2, frc3;
//---------------------------------------------------------------------
// set up the sub-domains for integeration in each processor
//---------------------------------------------------------------------
ibeg = ii1;
ifin = ii2;
jbeg = ji1;
jfin = ji2;
ifin1 = ifin - 1;
jfin1 = jfin - 1;
#pragma omp parallel default(shared) private(i,j,k) \
shared(ki1,ki2,ifin,ibeg,jfin,jbeg,ifin1,jfin1)
{
#pragma omp for nowait
for (j = jbeg; j < jfin; j++) {
for (i = ibeg; i < ifin; i++) {
k = ki1;
phi1[j][i] = C2*( u[k][j][i][4]
- 0.50 * ( u[k][j][i][1] * u[k][j][i][1]
+ u[k][j][i][2] * u[k][j][i][2]
+ u[k][j][i][3] * u[k][j][i][3] )
/ u[k][j][i][0] );
k = ki2 - 1;
phi2[j][i] = C2*( u[k][j][i][4]
- 0.50 * ( u[k][j][i][1] * u[k][j][i][1]
+ u[k][j][i][2] * u[k][j][i][2]
+ u[k][j][i][3] * u[k][j][i][3] )
/ u[k][j][i][0] );
}
}
#pragma omp single
frc1 = 0.0;
#pragma omp for reduction(+:frc1)
for (j = jbeg; j < jfin1; j++) {
for (i = ibeg; i < ifin1; i++) {
frc1 = frc1 + ( phi1[j][i]
+ phi1[j][i+1]
+ phi1[j+1][i]
+ phi1[j+1][i+1]
+ phi2[j][i]
+ phi2[j][i+1]
+ phi2[j+1][i]
+ phi2[j+1][i+1] );
}
}
#pragma omp single nowait
frc1 = dxi * deta * frc1;
#pragma omp for nowait
for (k = ki1; k < ki2; k++) {
for (i = ibeg; i < ifin; i++) {
phi1[k][i] = C2*( u[k][jbeg][i][4]
- 0.50 * ( u[k][jbeg][i][1] * u[k][jbeg][i][1]
+ u[k][jbeg][i][2] * u[k][jbeg][i][2]
+ u[k][jbeg][i][3] * u[k][jbeg][i][3] )
/ u[k][jbeg][i][0] );
}
}
#pragma omp for nowait
for (k = ki1; k < ki2; k++) {
for (i = ibeg; i < ifin; i++) {
phi2[k][i] = C2*( u[k][jfin-1][i][4]
- 0.50 * ( u[k][jfin-1][i][1] * u[k][jfin-1][i][1]
+ u[k][jfin-1][i][2] * u[k][jfin-1][i][2]
+ u[k][jfin-1][i][3] * u[k][jfin-1][i][3] )
/ u[k][jfin-1][i][0] );
}
}
#pragma omp single
frc2 = 0.0;
#pragma omp for reduction(+:frc2)
for (k = ki1; k < ki2-1; k++) {
for (i = ibeg; i < ifin1; i++) {
frc2 = frc2 + ( phi1[k][i]
+ phi1[k][i+1]
+ phi1[k+1][i]
+ phi1[k+1][i+1]
+ phi2[k][i]
+ phi2[k][i+1]
+ phi2[k+1][i]
+ phi2[k+1][i+1] );
}
}
#pragma omp single nowait
frc2 = dxi * dzeta * frc2;
#pragma omp for nowait
for (k = ki1; k < ki2; k++) {
for (j = jbeg; j < jfin; j++) {
phi1[k][j] = C2*( u[k][j][ibeg][4]
- 0.50 * ( u[k][j][ibeg][1] * u[k][j][ibeg][1]
+ u[k][j][ibeg][2] * u[k][j][ibeg][2]
+ u[k][j][ibeg][3] * u[k][j][ibeg][3] )
/ u[k][j][ibeg][0] );
}
}
#pragma omp for nowait
for (k = ki1; k < ki2; k++) {
for (j = jbeg; j < jfin; j++) {
phi2[k][j] = C2*( u[k][j][ifin-1][4]
- 0.50 * ( u[k][j][ifin-1][1] * u[k][j][ifin-1][1]
+ u[k][j][ifin-1][2] * u[k][j][ifin-1][2]
+ u[k][j][ifin-1][3] * u[k][j][ifin-1][3] )
/ u[k][j][ifin-1][0] );
}
}
#pragma omp single
frc3 = 0.0;
#pragma omp for reduction(+:frc3)
for (k = ki1; k < ki2-1; k++) {
for (j = jbeg; j < jfin1; j++) {
frc3 = frc3 + ( phi1[k][j]
+ phi1[k][j+1]
+ phi1[k+1][j]
+ phi1[k+1][j+1]
+ phi2[k][j]
+ phi2[k][j+1]
+ phi2[k+1][j]
+ phi2[k+1][j+1] );
}
}
#pragma omp single nowait
frc3 = deta * dzeta * frc3;
} //end parallel
frc = 0.25 * ( frc1 + frc2 + frc3 );
//printf("\n\n surface integral = %12.5E\n\n\n", frc);
}
|
quadip.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
//#define IND_arr(a,i) (PyArrayObject *)(a->data+i*a->strides[0])
#define IND2(a,i,j) *((double *)(a->data+i*a->strides[0]+j*a->strides[1]))
static PyObject *quadip(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *quadip(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *out, *ipparams, *position;
double a,b,c,d,e,f;
int i;
npy_intp dims[1];
static char *kwlist[] = {"ipparams","position","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&ipparams,&position,&etc))
{
return NULL;
}
a = IND(ipparams,0);
b = IND(ipparams,1);
c = IND(ipparams,2);
d = IND(ipparams,3);
e = IND(ipparams,4);
f = IND(ipparams,5);
dims[0] = PyArray_DIM(position, 1);
out = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(out,i) = a*pow(IND2(position,0,i),2)+b*pow(IND2(position,1,i),2)+ \
c*IND2(position,0,i)*IND2(position,1,i)+d*IND2(position,0,i)+e*IND2(position,1,i)+f;
}
return PyArray_Return(out);
}
static char module_docstring[]="\
This function fits the intra-pixel sensitivity effect using a 2D quadratic.\n\
\n\
Parameters\n\
----------\n\
a: quadratic coefficient in y\n\
b: quadratic coefficient in x\n\
c: coefficient for cross-term\n\
d: linear coefficient in y\n\
e: linear coefficient in x\n\
f: constant\n\
\n\
Returns\n\
-------\n\
returns the flux values for the intra-pixel model\n\
\n\
Revisions\n\
---------\n\
2008-07-05 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\
2011-01-05 Nate Lust, UCF\n\
natelust at linux dot com\n\
Converted to c extention function\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\
\n\
";
static PyMethodDef module_methods[] = {
{"quadip",(PyCFunction)quadip,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_quadip(void)
#else
initquadip(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"quadip", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("quadip", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
zq_cnn_convolution_gemm_nchwc_kernel1x1_neon_raw.h | #define __ARMV8 1
static void conv1x1s1_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const float* kernel = _kernel;
// interleave
#if __ARMV8
kernel_tm.create(4 * 8, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4);
#endif // __ARM_NEON && __ARMV8
int p = 0;
#if __ARMV8
for (; p + 7<outch; p += 8)
{
const float* kernel0 = kernel + (p + 0)*inch;
const float* kernel1 = kernel + (p + 1)*inch;
const float* kernel2 = kernel + (p + 2)*inch;
const float* kernel3 = kernel + (p + 3)*inch;
const float* kernel4 = kernel + (p + 4)*inch;
const float* kernel5 = kernel + (p + 5)*inch;
const float* kernel6 = kernel + (p + 6)*inch;
const float* kernel7 = kernel + (p + 7)*inch;
float* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q<inch; q++)
{
// kernel0...7 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp[4] = kernel4[0];
ktmp[5] = kernel5[0];
ktmp[6] = kernel6[0];
ktmp[7] = kernel7[0];
ktmp += 8;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
kernel4 += 1;
kernel5 += 1;
kernel6 += 1;
kernel7 += 1;
}
}
#endif // __ARMV8
for (; p + 3<outch; p += 4)
{
const float* kernel0 = kernel + (p + 0)*inch;
const float* kernel1 = kernel + (p + 1)*inch;
const float* kernel2 = kernel + (p + 2)*inch;
const float* kernel3 = kernel + (p + 3)*inch;
#if __ARMV8
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4);
#else
float* ktmp = kernel_tm.channel(p / 4);
#endif // __ARMV8
for (int q = 0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const float* kernel0 = kernel + p*inch;
#if __ARMV8
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
float* ktmp = kernel_tm.channel(p / 4 + p % 4);
#endif //__ARMV8
for (int q = 0; q<inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
static void conv1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 4u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii<nn_size; ii++)
{
int i = ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q<inch; q++)
{
#if __ARMV8
vst1q_f32(tmpptr, vld1q_f32(img0));
vst1q_f32(tmpptr + 4, vld1q_f32(img0 + 4));
tmpptr += 8;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
img0 += bottom_blob.cstep;
#endif // __ARMV8
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q<inch; q++)
{
#if __ARMV8
vst1q_f32(tmpptr, vld1q_f32(img0));
tmpptr += 4;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
img0 += bottom_blob.cstep;
#endif // __ARMV8
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i<size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARMV8
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp<nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f };
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[0] \n"
"dup v18.4s, v0.s[1] \n"
"dup v19.4s, v0.s[1] \n"
"dup v20.4s, v0.s[2] \n"
"dup v21.4s, v0.s[2] \n"
"dup v22.4s, v0.s[3] \n"
"dup v23.4s, v0.s[3] \n"
"dup v24.4s, v1.s[0] \n"
"dup v25.4s, v1.s[0] \n"
"dup v26.4s, v1.s[1] \n"
"dup v27.4s, v1.s[1] \n"
"dup v28.4s, v1.s[2] \n"
"dup v29.4s, v1.s[2] \n"
"dup v30.4s, v1.s[3] \n"
"dup v31.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[1] \n"
"dup v18.4s, v0.s[2] \n"
"dup v19.4s, v0.s[3] \n"
"dup v20.4s, v1.s[0] \n"
"dup v21.4s, v1.s[1] \n"
"dup v22.4s, v1.s[2] \n"
"dup v23.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 8);
asm volatile(
"ld1 {v24.4s, v25.4s}, [%20] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v0.4s, v8.s[0] \n"
"fmla v17.4s, v1.4s, v8.s[0] \n"
"fmla v18.4s, v2.4s, v8.s[1] \n"
"fmla v19.4s, v3.4s, v8.s[1] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v4.4s, v8.s[2] \n"
"fmla v21.4s, v5.4s, v8.s[2] \n"
"fmla v22.4s, v6.4s, v8.s[3] \n"
"fmla v23.4s, v7.4s, v8.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"fadd v16.4s, v16.4s, v20.4s \n"
"fadd v17.4s, v17.4s, v21.4s \n"
"fadd v24.4s, v24.4s, v16.4s \n"
"fadd v25.4s, v25.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #32] \n"
"ld1r {v8.4s}, [%8], #4 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v0.4s \n"
"fmla v25.4s, v8.4s, v1.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v24.s}[0],[%0], #4 \n"
"st1 {v24.s}[1],[%1], #4 \n"
"st1 {v24.s}[2],[%2], #4 \n"
"st1 {v24.s}[3],[%3], #4 \n"
"st1 {v25.s}[0],[%4], #4 \n"
"st1 {v25.s}[1],[%5], #4 \n"
"st1 {v25.s}[2],[%6], #4 \n"
"st1 {v25.s}[3],[%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"
);
}
}
#endif // __ARMV8
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = { 0.f, 0.f, 0.f, 0.f };
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[0] \n"
"dup v10.4s, v0.s[1] \n"
"dup v11.4s, v0.s[1] \n"
"dup v12.4s, v0.s[2] \n"
"dup v13.4s, v0.s[2] \n"
"dup v14.4s, v0.s[3] \n"
"dup v15.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[0] \n"
"vdup.f32 q10, d0[1] \n"
"vdup.f32 q11, d0[1] \n"
"vdup.f32 q12, d1[0] \n"
"vdup.f32 q13, d1[0] \n"
"vdup.f32 q14, d1[1] \n"
"vdup.f32 q15, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
"vst1.f32 {d24-d27}, [%2 :128]! \n"
"vst1.f32 {d28-d31}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __ARMV8
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[1] \n"
"dup v10.4s, v0.s[2] \n"
"dup v11.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[1] \n"
"vdup.f32 q10, d1[0] \n"
"vdup.f32 q11, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __ARMV8
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
#else
const float* kptr = kernel.channel(p / 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"ld1 {v12.4s}, [%12] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v12.4s, v12.4s, v8.4s \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #32] \n"
"ld1r {v4.4s}, [%4], #4 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v12.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v12.s}[0], [%0], #4 \n"
"st1 {v12.s}[1], [%1], #4 \n"
"st1 {v12.s}[2], [%2], #4 \n"
"st1 {v12.s}[3], [%3], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12"
);
#else // __ARMV8
asm volatile(
"vld1.f32 {d24-d25}, [%12] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"0: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q12, q12, q8 \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #32] \n"
"vld1.f32 {d8[],d9[]}, [%4]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d24[0]}, [%0]! \n"
"vst1.f32 {d24[1]}, [%1]! \n"
"vst1.f32 {d25[0]}, [%2]! \n"
"vst1.f32 {d25[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12"
);
#endif // __ARMV8
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float* outptr0 = out0;
int i = 0;
for (; i + 7<size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"dup v8.4s, %w6 \n"
"dup v9.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __ARMV8
asm volatile(
"vdup.f32 q8, %6 \n"
"vdup.f32 q9, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __ARMV8
}
for (; i + 3<size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
#if __ARMV8
asm volatile(
"dup v8.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __ARMV8
asm volatile(
"vdup.f32 q8, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __ARMV8
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __ARMV8
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 4 + p % 4);
#endif // __ARMV8
int q = 0;
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q + 3<inch; q += 4)
{
float32x4_t _p0 = vld1q_f32(tmpptr);
tmpptr += 4;
float32x4_t _k0 = vld1q_f32(kptr);
kptr += 4;
#if __ARMV8
_sum0 = vfmaq_f32(_sum0, _p0, _k0);
#else
_sum0 = vmlaq_f32(_sum0, _p0, _k0);
#endif
}
#if __ARMV8
float sum0 = bias0 + vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0);
#endif
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
|
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
switch (storage)
{
case CharPixel: image->depth=8*sizeof(unsigned char); break;
case DoublePixel: image->depth=8*sizeof(double); break;
case FloatPixel: image->depth=8*sizeof(float); break;
case LongPixel: image->depth=8*sizeof(unsigned long); break;
case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break;
case ShortPixel: image->depth=8*sizeof(unsigned short); break;
default: break;
}
length=strlen(map);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (length == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
const char
*value;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
GeometryInfo
geometry_info;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(read_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) &&
(GetImageListLength(image) != 1))
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones != (Image *) NULL)
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property,
timestamp[MagickPathExtent];
const char
*option;
const StringInfo
*profile;
ssize_t
option_type;
static const char
*source_date_epoch = (const char *) NULL;
static MagickBooleanType
epoch_initalized = MagickFalse;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if (*magick_path == '\0' && *next->magick == '\0')
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
(void) GetImageProperty(next,"exif:*",exception);
(void) GetImageProperty(next,"icc:*",exception);
(void) GetImageProperty(next,"iptc:*",exception);
(void) GetImageProperty(next,"xmp:*",exception);
value=GetImageProperty(next,"exif:Orientation",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:Orientation",exception);
if (value != (char *) NULL)
{
next->orientation=(OrientationType) StringToLong(value);
(void) DeleteImageProperty(next,"tiff:Orientation");
(void) DeleteImageProperty(next,"exif:Orientation");
}
value=GetImageProperty(next,"exif:XResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.x;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:XResolution");
}
value=GetImageProperty(next,"exif:YResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.y;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:YResolution");
}
value=GetImageProperty(next,"exif:ResolutionUnit",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:ResolutionUnit",exception);
if (value != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
value);
if (option_type >= 0)
next->units=(ResolutionType) option_type;
(void) DeleteImageProperty(next,"exif:ResolutionUnit");
(void) DeleteImageProperty(next,"tiff:ResolutionUnit");
}
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
option=GetImageOption(read_info,"caption");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"comment");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"label");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
if (epoch_initalized == MagickFalse)
{
source_date_epoch=getenv("SOURCE_DATE_EPOCH");
epoch_initalized=MagickTrue;
}
if (source_date_epoch == (const char *) NULL)
{
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
}
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (next->delay > (size_t) floor(geometry_info.rho+0.5))
next->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (next->delay < (size_t) floor(geometry_info.rho+0.5))
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
next->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
{
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
option);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
if (GetBlobError(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
register const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
p++;
length=0;
blob=Base64Decode(p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
if (GetBlobError(image) != MagickFalse)
ThrowWriterException(FileOpenError,"UnableToWriteFile");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
register Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
register Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
register ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
GB_binop__band_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__band_int32
// A.*B function (eWiseMult): GB_AemultB__band_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__band_int32
// C+=b function (dense accum): GB_Cdense_accumb__band_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_int32
// C=scalar+B GB_bind1st__band_int32
// C=scalar+B' GB_bind1st_tran__band_int32
// C=A+scalar GB_bind2nd__band_int32
// C=A'+scalar GB_bind2nd_tran__band_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT32 || GxB_NO_BAND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__band_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__band_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__band_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__band_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__band_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__band_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__band_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB_bind1st_tran__band_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB_bind2nd_tran__band_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
perftest.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "api/libperf.h"
#include "lib/libperf_int.h"
#include <ucs/sys/string.h>
#include <ucs/sys/sys.h>
#include <ucs/sys/sock.h>
#include <ucs/debug/log.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <netdb.h>
#include <getopt.h>
#include <string.h>
#include <sys/types.h>
#include <sys/poll.h>
#include <locale.h>
#if HAVE_MPI
# include <mpi.h>
#elif HAVE_RTE
# include<rte.h>
#endif
#define MAX_BATCH_FILES 32
#define TL_RESOURCE_NAME_NONE "<none>"
#define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:"
enum {
TEST_FLAG_PRINT_RESULTS = UCS_BIT(0),
TEST_FLAG_PRINT_TEST = UCS_BIT(1),
TEST_FLAG_SET_AFFINITY = UCS_BIT(8),
TEST_FLAG_NUMERIC_FMT = UCS_BIT(9),
TEST_FLAG_PRINT_FINAL = UCS_BIT(10),
TEST_FLAG_PRINT_CSV = UCS_BIT(11)
};
typedef struct sock_rte_group {
int is_server;
int connfd;
} sock_rte_group_t;
typedef struct test_type {
const char *name;
ucx_perf_api_t api;
ucx_perf_cmd_t command;
ucx_perf_test_type_t test_type;
const char *desc;
} test_type_t;
struct perftest_context {
ucx_perf_params_t params;
const char *server_addr;
int port;
int mpi;
unsigned cpu;
unsigned flags;
unsigned num_batch_files;
char *batch_files[MAX_BATCH_FILES];
char *test_names[MAX_BATCH_FILES];
sock_rte_group_t sock_rte_group;
};
test_type_t tests[] = {
{"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"active message latency"},
{"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG,
"atomic add latency"},
{"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / rate"},
{"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / rate"},
{"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / rate"},
{"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"active message bandwidth / message rate"},
{"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth / message rate"},
{"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add message rate"},
{"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG,
"tag match latency"},
{"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag match bandwidth"},
{"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG,
"tag sync match latency"},
{"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag sync match bandwidth"},
{"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth"},
{"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add bandwidth / message rate"},
{"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / bandwidth / rate"},
{"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / bandwidth / rate"},
{"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / bandwidth / rate"},
{"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"stream bandwidth"},
{"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG,
"stream latency"},
{NULL}
};
static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int),
int poll_events, void *data, size_t size,
void (*progress)(void *arg), void *arg, const char *name)
{
size_t total = 0;
struct pollfd pfd;
int ret;
while (total < size) {
pfd.fd = sock;
pfd.events = poll_events;
pfd.revents = 0;
ret = poll(&pfd, 1, 1); /* poll for 1ms */
if (ret > 0) {
ucs_assert(ret == 1);
ucs_assert(pfd.revents & poll_events);
ret = sock_call(sock, (char*)data + total, size - total, 0);
if (ret < 0) {
ucs_error("%s() failed: %m", name);
return -1;
}
total += ret;
} else if ((ret < 0) && (errno != EINTR)) {
ucs_error("poll(fd=%d) failed: %m", sock);
return -1;
}
/* progress user context */
if (progress != NULL) {
progress(arg);
}
}
return 0;
}
static int safe_send(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
typedef ssize_t (*sock_call)(int, void *, size_t, int);
return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send");
}
static int safe_recv(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv");
}
static void print_progress(char **test_names, unsigned num_names,
const ucx_perf_result_t *result, unsigned flags,
int final)
{
static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n";
static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n";
static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n";
unsigned i;
if (!(flags & TEST_FLAG_PRINT_RESULTS) ||
(!final && (flags & TEST_FLAG_PRINT_FINAL)))
{
return;
}
if (flags & TEST_FLAG_PRINT_CSV) {
for (i = 0; i < num_names; ++i) {
printf("%s,", test_names[i]);
}
}
printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv :
(flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric :
fmt_plain,
(double)result->iters,
result->latency.typical * 1000000.0,
result->latency.moment_average * 1000000.0,
result->latency.total_average * 1000000.0,
result->bandwidth.moment_average / (1024.0 * 1024.0),
result->bandwidth.total_average / (1024.0 * 1024.0),
result->msgrate.moment_average,
result->msgrate.total_average);
fflush(stdout);
}
static void print_header(struct perftest_context *ctx)
{
const char *test_api_str;
const char *test_data_str;
test_type_t *test;
unsigned i;
if (ctx->flags & TEST_FLAG_PRINT_TEST) {
for (test = tests; test->name; ++test) {
if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) {
break;
}
}
if (test->name != NULL) {
if (test->api == UCX_PERF_API_UCT) {
test_api_str = "transport layer";
switch (ctx->params.uct.data_layout) {
case UCT_PERF_DATA_LAYOUT_SHORT:
test_data_str = "short";
break;
case UCT_PERF_DATA_LAYOUT_BCOPY:
test_data_str = "bcopy";
break;
case UCT_PERF_DATA_LAYOUT_ZCOPY:
test_data_str = "zcopy";
break;
default:
test_data_str = "(undefined)";
break;
}
} else if (test->api == UCX_PERF_API_UCP) {
test_api_str = "protocol layer";
test_data_str = "(automatic)"; /* TODO contig/stride/stream */
} else {
return;
}
printf("+------------------------------------------------------------------------------------------+\n");
printf("| API: %-60s |\n", test_api_str);
printf("| Test: %-60s |\n", test->desc);
printf("| Data layout: %-60s |\n", test_data_str);
printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params));
}
}
if (ctx->flags & TEST_FLAG_PRINT_CSV) {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
for (i = 0; i < ctx->num_batch_files; ++i) {
printf("%s,", basename(ctx->batch_files[i]));
}
printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n");
}
} else {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("+--------------+-----------------------------+---------------------+-----------------------+\n");
printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
printf("| # iterations | typical | average | overall | average | overall | average | overall |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
} else if (ctx->flags & TEST_FLAG_PRINT_TEST) {
printf("+------------------------------------------------------------------------------------------+\n");
}
}
}
static void print_test_name(struct perftest_context *ctx)
{
char buf[200];
unsigned i, pos;
if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) {
strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+");
pos = 1;
for (i = 0; i < ctx->num_batch_files; ++i) {
if (i != 0) {
buf[pos++] = '/';
}
memcpy(&buf[pos], ctx->test_names[i],
ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1));
pos += strlen(ctx->test_names[i]);
}
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("%s\n", buf);
}
}
}
static void usage(const struct perftest_context *ctx, const char *program)
{
static const char* api_names[] = {
[UCX_PERF_API_UCT] = "UCT",
[UCX_PERF_API_UCP] = "UCP"
};
test_type_t *test;
int UCS_V_UNUSED rank;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ctx->mpi && (rank != 0)) {
return;
}
#endif
#if HAVE_MPI
printf(" Note: test can be also launched as an MPI application\n");
printf("\n");
#elif HAVE_RTE
printf(" Note: this test can be also launched as an libRTE application\n");
printf("\n");
#endif
printf(" Usage: %s [ server-hostname ] [ options ]\n", program);
printf("\n");
printf(" Common options:\n");
printf(" -t <test> test to run:\n");
for (test = tests; test->name; ++test) {
printf(" %13s - %s %s\n", test->name,
api_names[test->api], test->desc);
}
printf("\n");
printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n",
ctx->params.msg_size_list[0]);
printf(" for example: \"-s 16,48,8192,8192,14\"\n");
printf(" -m <mem type> memory type of messages\n");
printf(" host - system memory(default)\n");
if (ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA] != NULL) {
printf(" cuda - NVIDIA GPU memory\n");
}
if (ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA_MANAGED] != NULL) {
printf(" cuda-managed - NVIDIA cuda managed/unified memory\n");
}
printf(" -n <iters> number of iterations to run (%ld)\n", ctx->params.max_iter);
printf(" -w <iters> number of warm-up iterations (%zu)\n",
ctx->params.warmup_iter);
printf(" -c <cpu> set affinity to this CPU (off)\n");
printf(" -O <count> maximal number of uncompleted outstanding sends (%u)\n",
ctx->params.max_outstanding);
printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n",
ctx->params.iov_stride);
printf(" -T <threads> number of threads in the test (%d), if >1 implies \"-M multi\"\n",
ctx->params.thread_count);
printf(" -B register memory with NONBLOCK flag\n");
printf(" -b <file> read and execute tests from a batch file: every line in the\n");
printf(" file is a test to run, first word is test name, the rest of\n");
printf(" the line is command-line arguments for the test.\n");
printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port);
#if HAVE_MPI
printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi);
#endif
printf(" -h show this help message\n");
printf("\n");
printf(" Output format:\n");
printf(" -N use numeric formatting (thousands separator)\n");
printf(" -f print only final numbers\n");
printf(" -v print CSV-formatted output\n");
printf("\n");
printf(" UCT only:\n");
printf(" -d <device> device to use for testing\n");
printf(" -x <tl> transport to use for testing\n");
printf(" -D <layout> data layout for sender side:\n");
printf(" short - short messages (default, cannot be used for get)\n");
printf(" bcopy - copy-out (cannot be used for atomics)\n");
printf(" zcopy - zero-copy (cannot be used for atomics)\n");
printf(" iov - scatter-gather list (iovec)\n");
printf(" -W <count> flow control window size, for active messages (%u)\n",
ctx->params.uct.fc_window);
printf(" -H <size> active message header size (%zu)\n",
ctx->params.am_hdr_size);
printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n");
printf(" thread_spinlock - separate progress thread with spin locking\n");
printf(" thread_mutex - separate progress thread with mutex locking\n");
printf(" signal - signal-based timer\n");
printf("\n");
printf(" UCP only:\n");
printf(" -M <thread> thread support level for progress engine (single)\n");
printf(" single - only the master thread can access\n");
printf(" serialized - one thread can access at a time\n");
printf(" multi - multiple threads can access\n");
printf(" -D <layout>[,<layout>]\n");
printf(" data layout for sender and receiver side (contig)\n");
printf(" contig - Continuous datatype\n");
printf(" iov - Scatter-gather list\n");
printf(" -C use wild-card tag for tag tests\n");
printf(" -U force unexpected flow by using tag probe\n");
printf(" -r <mode> receive mode for stream tests (recv)\n");
printf(" recv : Use ucp_stream_recv_nb\n");
printf(" recv_data : Use ucp_stream_recv_data_nb\n");
printf("\n");
printf(" NOTE: When running UCP tests, transport and device should be specified by\n");
printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n");
printf("\n");
}
static ucs_status_t parse_ucp_datatype_params(const char *optarg,
ucp_perf_datatype_t *datatype)
{
const char *iov_type = "iov";
const size_t iov_type_size = strlen("iov");
const char *contig_type = "contig";
const size_t contig_type_size = strlen("contig");
if (0 == strncmp(optarg, iov_type, iov_type_size)) {
*datatype = UCP_PERF_DATATYPE_IOV;
} else if (0 == strncmp(optarg, contig_type, contig_type_size)) {
*datatype = UCP_PERF_DATATYPE_CONTIG;
} else {
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
}
static ucs_status_t parse_message_sizes_params(const char *optarg,
ucx_perf_params_t *params)
{
const char delim = ',';
size_t *msg_size_list, token_num, token_it;
char *optarg_ptr, *optarg_ptr2;
optarg_ptr = (char *)optarg;
token_num = 0;
/* count the number of given message sizes */
while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) {
++optarg_ptr;
++token_num;
}
++token_num;
msg_size_list = realloc(params->msg_size_list,
sizeof(*params->msg_size_list) * token_num);
if (NULL == msg_size_list) {
return UCS_ERR_NO_MEMORY;
}
params->msg_size_list = msg_size_list;
optarg_ptr = (char *)optarg;
errno = 0;
for (token_it = 0; token_it < token_num; ++token_it) {
params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10);
if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) ||
((errno != 0) && (params->msg_size_list[token_it] == 0)) ||
(optarg_ptr == optarg_ptr2)) {
free(params->msg_size_list);
params->msg_size_list = NULL; /* prevent double free */
ucs_error("Invalid option substring argument at position %lu", token_it);
return UCS_ERR_INVALID_PARAM;
}
optarg_ptr = optarg_ptr2 + 1;
}
params->msg_size_cnt = token_num;
return UCS_OK;
}
static ucs_status_t init_test_params(ucx_perf_params_t *params)
{
memset(params, 0, sizeof(*params));
params->api = UCX_PERF_API_LAST;
params->command = UCX_PERF_CMD_LAST;
params->test_type = UCX_PERF_TEST_TYPE_LAST;
params->thread_mode = UCS_THREAD_MODE_SINGLE;
params->thread_count = 1;
params->async_mode = UCS_ASYNC_THREAD_LOCK_TYPE;
params->wait_mode = UCX_PERF_WAIT_MODE_LAST;
params->max_outstanding = 1;
params->warmup_iter = 10000;
params->am_hdr_size = 8;
params->alignment = ucs_get_page_size();
params->max_iter = 1000000l;
params->max_time = 0.0;
params->report_interval = 1.0;
params->flags = UCX_PERF_TEST_FLAG_VERBOSE;
params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW;
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
params->mem_type = UCS_MEMORY_TYPE_HOST;
params->msg_size_cnt = 1;
params->iov_stride = 0;
params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG;
params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG;
strcpy(params->uct.dev_name, TL_RESOURCE_NAME_NONE);
strcpy(params->uct.tl_name, TL_RESOURCE_NAME_NONE);
params->msg_size_list = calloc(params->msg_size_cnt,
sizeof(*params->msg_size_list));
if (params->msg_size_list == NULL) {
return UCS_ERR_NO_MEMORY;
}
params->msg_size_list[0] = 8;
return UCS_OK;
}
static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg)
{
test_type_t *test;
char *optarg2 = NULL;
switch (opt) {
case 'd':
ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name),
"%s", optarg);
return UCS_OK;
case 'x':
ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name),
"%s", optarg);
return UCS_OK;
case 't':
for (test = tests; test->name; ++test) {
if (!strcmp(optarg, test->name)) {
params->api = test->api;
params->command = test->command;
params->test_type = test->test_type;
break;
}
}
if (test->name == NULL) {
ucs_error("Invalid option argument for -t");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'D':
if (!strcmp(optarg, "short")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
} else if (!strcmp(optarg, "bcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY;
} else if (!strcmp(optarg, "zcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY;
} else if (UCS_OK == parse_ucp_datatype_params(optarg,
¶ms->ucp.send_datatype)) {
optarg2 = strchr(optarg, ',');
if (optarg2) {
if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1,
¶ms->ucp.recv_datatype)) {
return UCS_ERR_INVALID_PARAM;
}
}
} else {
ucs_error("Invalid option argument for -D");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'i':
params->iov_stride = atol(optarg);
return UCS_OK;
case 'n':
params->max_iter = atol(optarg);
return UCS_OK;
case 's':
return parse_message_sizes_params(optarg, params);
case 'H':
params->am_hdr_size = atol(optarg);
return UCS_OK;
case 'W':
params->uct.fc_window = atoi(optarg);
return UCS_OK;
case 'O':
params->max_outstanding = atoi(optarg);
return UCS_OK;
case 'w':
params->warmup_iter = atol(optarg);
return UCS_OK;
case 'o':
params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED;
return UCS_OK;
case 'B':
params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK;
return UCS_OK;
case 'q':
params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE;
return UCS_OK;
case 'C':
params->flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD;
return UCS_OK;
case 'U':
params->flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE;
return UCS_OK;
case 'M':
if (!strcmp(optarg, "single")) {
params->thread_mode = UCS_THREAD_MODE_SINGLE;
return UCS_OK;
} else if (!strcmp(optarg, "serialized")) {
params->thread_mode = UCS_THREAD_MODE_SERIALIZED;
return UCS_OK;
} else if (!strcmp(optarg, "multi")) {
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -M");
return UCS_ERR_INVALID_PARAM;
}
case 'T':
params->thread_count = atoi(optarg);
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
case 'A':
if (!strcmp(optarg, "thread") || !strcmp(optarg, "thread_spinlock")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK;
return UCS_OK;
} else if (!strcmp(optarg, "thread_mutex")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_MUTEX;
return UCS_OK;
} else if (!strcmp(optarg, "signal")) {
params->async_mode = UCS_ASYNC_MODE_SIGNAL;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -A");
return UCS_ERR_INVALID_PARAM;
}
case 'r':
if (!strcmp(optarg, "recv_data")) {
params->flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
} else if (!strcmp(optarg, "recv")) {
params->flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
}
return UCS_ERR_INVALID_PARAM;
case 'm':
if (!strcmp(optarg, "host")) {
params->mem_type = UCS_MEMORY_TYPE_HOST;
return UCS_OK;
} else if (!strcmp(optarg, "cuda") &&
(ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA] != NULL)) {
params->mem_type = UCS_MEMORY_TYPE_CUDA;
return UCS_OK;
} else if (!strcmp(optarg, "cuda-managed") &&
(ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_CUDA_MANAGED] != NULL)) {
params->mem_type = UCS_MEMORY_TYPE_CUDA_MANAGED;
return UCS_OK;
}
ucs_error("Unsupported memory type: \"%s\"", optarg);
return UCS_ERR_INVALID_PARAM;
default:
return UCS_ERR_INVALID_PARAM;
}
}
static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name,
int *line_num, ucx_perf_params_t *params,
char** test_name_p)
{
#define MAX_SIZE 256
#define MAX_ARG_SIZE 2048
ucs_status_t status;
char buf[MAX_ARG_SIZE];
int argc;
char *argv[MAX_SIZE + 1];
int c;
char *p;
do {
if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) {
return UCS_ERR_NO_ELEM;
}
++(*line_num);
argc = 0;
p = strtok(buf, " \t\n\r");
while (p && (argc < MAX_SIZE)) {
argv[argc++] = p;
p = strtok(NULL, " \t\n\r");
}
argv[argc] = NULL;
} while ((argc == 0) || (argv[0][0] == '#'));
optind = 1;
while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) {
status = parse_test_params(params, c, optarg);
if (status != UCS_OK) {
ucs_error("in batch file '%s' line %d: -%c %s: %s",
file_name, *line_num, c, optarg, ucs_status_string(status));
return status;
}
}
*test_name_p = strdup(argv[0]);
return UCS_OK;
}
static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized,
int argc, char **argv)
{
ucs_status_t status;
int c;
ucs_trace_func("");
ucx_perf_global_init(); /* initialize memory types */
status = init_test_params(&ctx->params);
if (status != UCS_OK) {
return status;
}
ctx->server_addr = NULL;
ctx->num_batch_files = 0;
ctx->port = 13337;
ctx->flags = 0;
ctx->mpi = mpi_initialized;
optind = 1;
while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) {
switch (c) {
case 'p':
ctx->port = atoi(optarg);
break;
case 'b':
if (ctx->num_batch_files < MAX_BATCH_FILES) {
ctx->batch_files[ctx->num_batch_files++] = optarg;
}
break;
case 'N':
ctx->flags |= TEST_FLAG_NUMERIC_FMT;
break;
case 'f':
ctx->flags |= TEST_FLAG_PRINT_FINAL;
break;
case 'v':
ctx->flags |= TEST_FLAG_PRINT_CSV;
break;
case 'c':
ctx->flags |= TEST_FLAG_SET_AFFINITY;
ctx->cpu = atoi(optarg);
break;
case 'P':
#if HAVE_MPI
ctx->mpi = atoi(optarg) && mpi_initialized;
break;
#endif
case 'h':
usage(ctx, ucs_basename(argv[0]));
return UCS_ERR_CANCELED;
default:
status = parse_test_params(&ctx->params, c, optarg);
if (status != UCS_OK) {
usage(ctx, ucs_basename(argv[0]));
return status;
}
break;
}
}
if (optind < argc) {
ctx->server_addr = argv[optind];
}
return UCS_OK;
}
static unsigned sock_rte_group_size(void *rte_group)
{
return 2;
}
static unsigned sock_rte_group_index(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->is_server ? 0 : 1;
}
static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
sock_rte_group_t *group = rte_group;
const unsigned magic = 0xdeadbeef;
unsigned sync;
sync = magic;
safe_send(group->connfd, &sync, sizeof(unsigned), progress, arg);
sync = 0;
safe_recv(group->connfd, &sync, sizeof(unsigned), progress, arg);
ucs_assert(sync == magic);
}
#pragma omp barrier
}
static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
sock_rte_group_t *group = rte_group;
size_t size;
int i;
size = 0;
for (i = 0; i < iovcnt; ++i) {
size += iovec[i].iov_len;
}
safe_send(group->connfd, &size, sizeof(size), NULL, NULL);
for (i = 0; i < iovcnt; ++i) {
safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL,
NULL);
}
}
static void sock_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
sock_rte_group_t *group = rte_group;
int group_index;
size_t size;
group_index = sock_rte_group_index(rte_group);
if (src == group_index) {
return;
}
ucs_assert_always(src == (1 - group_index));
safe_recv(group->connfd, &size, sizeof(size), NULL, NULL);
ucs_assert_always(size <= max);
safe_recv(group->connfd, buffer, size, NULL, NULL);
}
static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t sock_rte = {
.group_size = sock_rte_group_size,
.group_index = sock_rte_group_index,
.barrier = sock_rte_barrier,
.post_vec = sock_rte_post_vec,
.recv = sock_rte_recv,
.exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function,
.report = sock_rte_report,
};
static ucs_status_t setup_sock_rte(struct perftest_context *ctx)
{
struct sockaddr_in inaddr;
struct hostent *he;
ucs_status_t status;
int optval = 1;
int sockfd, connfd;
int ret;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
ucs_error("socket() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err;
}
if (ctx->server_addr == NULL) {
optval = 1;
status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR,
&optval, sizeof(optval));
if (status != UCS_OK) {
goto err_close_sockfd;
}
inaddr.sin_family = AF_INET;
inaddr.sin_port = htons(ctx->port);
inaddr.sin_addr.s_addr = INADDR_ANY;
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("bind() failed: %m");
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
ret = listen(sockfd, 10);
if (ret < 0) {
ucs_error("listen() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
printf("Waiting for connection...\n");
/* Accept next connection */
connfd = accept(sockfd, NULL, NULL);
if (connfd < 0) {
ucs_error("accept() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
close(sockfd);
ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
if (ctx->params.msg_size_cnt) {
ctx->params.msg_size_list = calloc(ctx->params.msg_size_cnt,
sizeof(*ctx->params.msg_size_list));
if (NULL == ctx->params.msg_size_list) {
status = UCS_ERR_NO_MEMORY;
goto err_close_connfd;
}
ret = safe_recv(connfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) *
ctx->params.msg_size_cnt,
NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
}
ctx->sock_rte_group.connfd = connfd;
ctx->sock_rte_group.is_server = 1;
} else {
he = gethostbyname(ctx->server_addr);
if (he == NULL || he->h_addr_list == NULL) {
ucs_error("host %s not found: %s", ctx->server_addr,
hstrerror(h_errno));
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
inaddr.sin_family = he->h_addrtype;
inaddr.sin_port = htons(ctx->port);
ucs_assert(he->h_length == sizeof(inaddr.sin_addr));
memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length);
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("connect() failed: %m");
status = UCS_ERR_UNREACHABLE;
goto err_close_sockfd;
}
safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.msg_size_cnt) {
safe_send(sockfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.connfd = sockfd;
ctx->sock_rte_group.is_server = 0;
}
if (ctx->sock_rte_group.is_server) {
ctx->flags |= TEST_FLAG_PRINT_TEST;
} else {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = &ctx->sock_rte_group;
ctx->params.rte = &sock_rte;
ctx->params.report_arg = ctx;
return UCS_OK;
err_close_connfd:
close(connfd);
goto err;
err_close_sockfd:
close(sockfd);
err:
return status;
}
static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx)
{
close(ctx->sock_rte_group.connfd);
return UCS_OK;
}
#if HAVE_MPI
static unsigned mpi_rte_group_size(void *rte_group)
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static unsigned mpi_rte_group_index(void *rte_group)
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
int group_size, my_rank, i;
MPI_Request *reqs;
int nreqs = 0;
int dummy;
int flag;
#pragma omp barrier
#pragma omp master
/*
* Naive non-blocking barrier implementation over send/recv, to call user
* progress while waiting for completion.
* Not using MPI_Ibarrier to be compatible with MPI-1.
*/
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
/* allocate maximal possible number of requests */
reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size);
if (my_rank == 0) {
/* root gathers "ping" from all other ranks */
for (i = 1; i < group_size; ++i) {
MPI_Irecv(&dummy, 0, MPI_INT,
i /* source */,
1 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
} else {
/* every non-root rank sends "ping" and waits for "pong" */
MPI_Send(&dummy, 0, MPI_INT,
0 /* dest */,
1 /* tag */,
MPI_COMM_WORLD);
MPI_Irecv(&dummy, 0, MPI_INT,
0 /* source */,
2 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
/* Waiting for receive requests */
do {
MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE);
progress(arg);
} while (!flag);
if (my_rank == 0) {
/* root sends "pong" to all ranks */
for (i = 1; i < group_size; ++i) {
MPI_Send(&dummy, 0, MPI_INT,
i /* dest */,
2 /* tag */,
MPI_COMM_WORLD);
}
}
#pragma omp barrier
}
static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
int group_size;
int my_rank;
int dest, i;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
for (dest = 0; dest < group_size; ++dest) {
if (dest == my_rank) {
continue;
}
for (i = 0; i < iovcnt; ++i) {
MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest,
i == (iovcnt - 1), /* Send last iov with tag == 1 */
MPI_COMM_WORLD);
}
}
*req = (void*)(uintptr_t)1;
}
static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max,
void *req)
{
MPI_Status status;
size_t offset;
int my_rank;
int count;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (src == my_rank) {
return;
}
offset = 0;
do {
ucs_assert_always(offset < max);
MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_BYTE, &count);
offset += count;
} while (status.MPI_TAG != 1);
}
static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t mpi_rte = {
.group_size = mpi_rte_group_size,
.group_index = mpi_rte_group_index,
.barrier = mpi_rte_barrier,
.post_vec = mpi_rte_post_vec,
.recv = mpi_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = mpi_rte_report,
};
#elif HAVE_RTE
static unsigned ext_rte_group_size(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_size(group);
}
static unsigned ext_rte_group_index(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_rank(group);
}
static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
rte_group_t group = (rte_group_t)rte_group;
int rc;
rc = rte_barrier(group);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_barrier");
}
}
#pragma omp barrier
}
static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec,
int iovcnt, void **req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session;
rte_iovec_t *r_vec;
int i, rc;
rc = rte_srs_session_create(group, 0, &session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_create");
}
r_vec = calloc(iovcnt, sizeof(rte_iovec_t));
if (r_vec == NULL) {
return;
}
for (i = 0; i < iovcnt; ++i) {
r_vec[i].iov_base = iovec[i].iov_base;
r_vec[i].type = rte_datatype_uint8_t;
r_vec[i].count = iovec[i].iov_len;
}
rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_set_data");
}
*req = session;
free(r_vec);
}
static void ext_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session = (rte_srs_session_t)req;
void *rte_buffer = NULL;
rte_iovec_t r_vec;
uint32_t offset;
int size;
int rc;
rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src),
"KEY_PERF", &rte_buffer, &size);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_get_data");
return;
}
r_vec.iov_base = buffer;
r_vec.type = rte_datatype_uint8_t;
r_vec.count = max;
offset = 0;
rte_unpack(&r_vec, rte_buffer, &offset);
rc = rte_srs_session_destroy(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_destroy");
}
free(rte_buffer);
}
static void ext_rte_exchange_vec(void *rte_group, void * req)
{
rte_srs_session_t session = (rte_srs_session_t)req;
int rc;
rc = rte_srs_exchange_data(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_exchange_data");
}
}
static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t ext_rte = {
.group_size = ext_rte_group_size,
.group_index = ext_rte_group_index,
.barrier = ext_rte_barrier,
.report = ext_rte_report,
.post_vec = ext_rte_post_vec,
.recv = ext_rte_recv,
.exchange_vec = ext_rte_exchange_vec,
};
#endif
static ucs_status_t setup_mpi_rte(struct perftest_context *ctx)
{
ucs_trace_func("");
#if HAVE_MPI
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 2) {
ucs_error("This test should run with exactly 2 processes (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 1) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = NULL;
ctx->params.rte = &mpi_rte;
ctx->params.report_arg = ctx;
#elif HAVE_RTE
rte_group_t group;
rte_init(NULL, NULL, &group);
if (1 == rte_group_rank(group)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = group;
ctx->params.rte = &ext_rte;
ctx->params.report_arg = ctx;
#endif
return UCS_OK;
}
static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx)
{
#if HAVE_RTE
rte_finalize();
#endif
return UCS_OK;
}
static ucs_status_t check_system(struct perftest_context *ctx)
{
cpu_set_t cpuset;
unsigned i, count, nr_cpus;
int ret;
ucs_trace_func("");
ret = sysconf(_SC_NPROCESSORS_CONF);
if (ret < 0) {
ucs_error("failed to get local cpu count: %m");
return UCS_ERR_INVALID_PARAM;
}
nr_cpus = ret;
memset(&cpuset, 0, sizeof(cpuset));
if (ctx->flags & TEST_FLAG_SET_AFFINITY) {
if (ctx->cpu >= nr_cpus) {
ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1);
return UCS_ERR_INVALID_PARAM;
}
CPU_SET(ctx->cpu, &cpuset);
ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_setaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
} else {
ret = sched_getaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_getaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
count = 0;
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, &cpuset)) {
++count;
}
}
if (count > 2) {
ucs_warn("CPU affinity is not set (bound to %u cpus)."
" Performance may be impacted.", count);
}
}
return UCS_OK;
}
static ucs_status_t clone_params(ucx_perf_params_t *dest,
const ucx_perf_params_t *src)
{
size_t msg_size_list_size;
*dest = *src;
msg_size_list_size = dest->msg_size_cnt * sizeof(*dest->msg_size_list);
dest->msg_size_list = malloc(msg_size_list_size);
if (dest->msg_size_list == NULL) {
return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK);
}
memcpy(dest->msg_size_list, src->msg_size_list, msg_size_list_size);
return UCS_OK;
}
static ucs_status_t run_test_recurs(struct perftest_context *ctx,
ucx_perf_params_t *parent_params,
unsigned depth)
{
ucx_perf_params_t params;
ucx_perf_result_t result;
ucs_status_t status;
FILE *batch_file;
int line_num;
ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files);
if (parent_params->api == UCX_PERF_API_UCP) {
if (strcmp(parent_params->uct.dev_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.dev_name);
}
if (strcmp(parent_params->uct.tl_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.tl_name);
}
}
if (depth >= ctx->num_batch_files) {
print_test_name(ctx);
return ucx_perf_run(parent_params, &result);
}
batch_file = fopen(ctx->batch_files[depth], "r");
if (batch_file == NULL) {
ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]);
return UCS_ERR_IO_ERROR;
}
status = clone_params(¶ms, parent_params);
if (status != UCS_OK) {
goto out;
}
line_num = 0;
while ((status = read_batch_file(batch_file, ctx->batch_files[depth],
&line_num, ¶ms,
&ctx->test_names[depth])) == UCS_OK) {
run_test_recurs(ctx, ¶ms, depth + 1);
free(params.msg_size_list);
free(ctx->test_names[depth]);
ctx->test_names[depth] = NULL;
status = clone_params(¶ms, parent_params);
if (status != UCS_OK) {
goto out;
}
}
if (status == UCS_ERR_NO_ELEM) {
status = UCS_OK;
}
free(params.msg_size_list);
out:
fclose(batch_file);
return status;
}
static ucs_status_t run_test(struct perftest_context *ctx)
{
ucs_status_t status;
ucs_trace_func("");
setlocale(LC_ALL, "en_US");
print_header(ctx);
status = run_test_recurs(ctx, &ctx->params, 0);
if (status != UCS_OK) {
ucs_error("Failed to run test: %s", ucs_status_string(status));
}
return status;
}
int main(int argc, char **argv)
{
struct perftest_context ctx;
ucs_status_t status;
int mpi_initialized;
int mpi_rte;
int ret;
#if HAVE_MPI
mpi_initialized = !isatty(0) && (MPI_Init(&argc, &argv) == 0);
#else
mpi_initialized = 0;
#endif
/* Parse command line */
status = parse_opts(&ctx, mpi_initialized, argc, argv);
if (status != UCS_OK) {
ret = (status == UCS_ERR_CANCELED) ? 0 : -127;
goto out;
}
#ifdef __COVERITY__
/* coverity[dont_call] */
mpi_rte = rand(); /* Shut up deadcode error */
#endif
if (ctx.mpi) {
mpi_rte = 1;
} else {
#if HAVE_RTE
mpi_rte = 1;
#else
mpi_rte = 0;
#endif
}
status = check_system(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Create RTE */
status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Run the test */
status = run_test(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_cleanup_rte;
}
ret = 0;
out_cleanup_rte:
(mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx);
out:
if (ctx.params.msg_size_list) {
free(ctx.params.msg_size_list);
}
if (mpi_initialized) {
#if HAVE_MPI
MPI_Finalize();
#endif
}
return ret;
}
|
dftcommon.c | // Copyright Naoki Shibata 2010 - 2019.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <ctype.h>
#include <inttypes.h>
#include <assert.h>
#if defined(POWER64_UNDEF_USE_EXTERN_INLINES)
// This is a workaround required to cross compile for PPC64 binaries
#include <features.h>
#ifdef __USE_EXTERN_INLINES
#undef __USE_EXTERN_INLINES
#endif
#endif
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "sleef.h"
#define IMPORT_IS_EXPORT
#include "sleefdft.h"
#include "dispatchparam.h"
#include "dftcommon.h"
#include "common.h"
#include "arraymap.h"
#define MAGIC_FLOAT 0x31415926
#define MAGIC_DOUBLE 0x27182818
#define MAGIC_LONGDOUBLE 0x14142135
#define MAGIC_QUAD 0x33166247
#define MAGIC2D_FLOAT 0x22360679
#define MAGIC2D_DOUBLE 0x17320508
#define MAGIC2D_LONGDOUBLE 0x26457513
#define MAGIC2D_QUAD 0x36055512
const char *configStr[] = { "ST", "ST stream", "MT", "MT stream" };
static int parsePathStr(char *p, int *path, int *config, int pathLenMax, int log2len) {
int pathLen = 0, l2l = 0;
for(;;) {
while(*p == ' ') p++;
if (*p == '\0') break;
if (!isdigit(*p)) return -1;
pathLen++;
if (pathLen >= pathLenMax) return -2;
int n = 0;
while(isdigit(*p)) n = n * 10 + *p++ - '0';
if (n > MAXBUTWIDTH) return -6;
path[pathLen-1] = n;
l2l += n;
config[pathLen-1] = 0;
if (*p != '(') continue;
int c;
for(c=3;c>=0;c--) if (strncmp(p+1, configStr[c], strlen(configStr[c])) == 0) break;
if (c == -1) return -3;
p += strlen(configStr[c]) + 1;
if (*p != ')') return -4;
p++;
config[pathLen-1] = c;
}
if (l2l != log2len) return -5;
return pathLen;
}
EXPORT void SleefDFT_setPath(SleefDFT *p, char *pathStr) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
int path[32], config[32];
int pathLen = parsePathStr(pathStr, path, config, 31, p->log2len);
if (pathLen < 0) {
if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Error %d in parsing path string : %s\n", pathLen, pathStr);
return;
}
for(uint32_t j = 0;j <= p->log2len;j++) p->bestPath[j] = 0;
for(int level = p->log2len, j=0;level > 0 && j < pathLen;) {
p->bestPath[level] = path[j];
p->bestPathConfig[level] = config[j];
level -= path[j];
j++;
}
p->pathLen = 0;
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++;
if ((p->mode & SLEEF_MODE_VERBOSE) != 0) {
printf("Set path : ");
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]);
printf("\n");
}
}
void freeTables(SleefDFT *p) {
for(int N=1;N<=MAXBUTWIDTH;N++) {
for(uint32_t level=N;level<=p->log2len;level++) {
Sleef_free(p->tbl[N][level]);
}
free(p->tbl[N]);
p->tbl[N] = NULL;
}
}
EXPORT void SleefDFT_dispose(SleefDFT *p) {
if (p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)) {
Sleef_free(p->tBuf);
SleefDFT_dispose(p->instH);
if (p->hlen != p->vlen) SleefDFT_dispose(p->instV);
p->magic = 0;
free(p);
return;
}
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
if (p->log2len <= 1) {
p->magic = 0;
free(p);
return;
}
if ((p->mode & SLEEF_MODE_REAL) != 0) {
Sleef_free(p->rtCoef1);
Sleef_free(p->rtCoef0);
p->rtCoef0 = p->rtCoef1 = NULL;
}
for(int level = p->log2len;level >= 1;level--) {
Sleef_free(p->perm[level]);
}
free(p->perm);
p->perm = NULL;
freeTables(p);
p->magic = 0;
free(p);
}
uint32_t ilog2(uint32_t q) {
static const uint32_t tab[] = {0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4};
uint32_t r = 0,qq;
if (q & 0xffff0000) r = 16;
q >>= r;
qq = q | (q >> 1);
qq |= (qq >> 2);
qq = ((qq & 0x10) >> 4) | ((qq & 0x100) >> 7) | ((qq & 0x1000) >> 10);
return r + tab[qq] * 4 + tab[q >> (tab[qq] * 4)] - 1;
}
//
char *dftPlanFilePath = NULL;
char *archID = NULL;
uint64_t planMode = SLEEF_PLAN_REFERTOENVVAR;
ArrayMap *planMap = NULL;
int planFilePathSet = 0, planFileLoaded = 0;
#ifdef _OPENMP
omp_lock_t planMapLock;
int planMapLockInitialized = 0;
#endif
static void initPlanMapLock() {
#ifdef _OPENMP
#pragma omp critical
{
if (!planMapLockInitialized) {
planMapLockInitialized = 1;
omp_init_lock(&planMapLock);
}
}
#endif
}
static void planMap_clear() {
if (planMap != NULL) ArrayMap_dispose(planMap);
planMap = NULL;
}
EXPORT void SleefDFT_setPlanFilePath(const char *path, const char *arch, uint64_t mode) {
initPlanMapLock();
if ((mode & SLEEF_PLAN_RESET) != 0) {
planMap_clear();
planFileLoaded = 0;
planFilePathSet = 0;
}
if (dftPlanFilePath != NULL) free(dftPlanFilePath);
if (path != NULL) {
dftPlanFilePath = malloc(strlen(path)+10);
strcpy(dftPlanFilePath, path);
} else {
dftPlanFilePath = NULL;
}
if (archID != NULL) free(archID);
if (arch == NULL) arch = Sleef_getCpuIdString();
archID = malloc(strlen(arch)+10);
strcpy(archID, arch);
planMode = mode;
planFilePathSet = 1;
}
static void loadPlanFromFile() {
if (planFilePathSet == 0 && (planMode & SLEEF_PLAN_REFERTOENVVAR) != 0) {
char *s = getenv(ENVVAR);
if (s != NULL) SleefDFT_setPlanFilePath(s, NULL, planMode);
}
if (planMap != NULL) ArrayMap_dispose(planMap);
if (dftPlanFilePath != NULL && (planMode & SLEEF_PLAN_RESET) == 0) {
planMap = ArrayMap_load(dftPlanFilePath, archID, PLANFILEID, (planMode & SLEEF_PLAN_NOLOCK) == 0);
}
if (planMap == NULL) planMap = initArrayMap();
planFileLoaded = 1;
}
static void savePlanToFile() {
assert(planFileLoaded);
if ((planMode & SLEEF_PLAN_READONLY) == 0 && dftPlanFilePath != NULL) {
ArrayMap_save(planMap, dftPlanFilePath, archID, PLANFILEID);
}
}
#define CATBIT 8
#define BASETYPEIDBIT 2
#define LOG2LENBIT 8
#define DIRBIT 1
#define BUTSTATBIT 16
static uint64_t keyButStat(int baseTypeID, int log2len, int dir, int butStat) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 0;
uint64_t k = 0;
k = (k << BUTSTATBIT) | (butStat & ~(~(uint64_t)0 << BUTSTATBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
#define LEVELBIT LOG2LENBIT
#define BUTCONFIGBIT 8
#define TRANSCONFIGBIT 8
static uint64_t keyTrans(int baseTypeID, int hlen, int vlen, int transConfig) {
int max = MAX(hlen, vlen), min = MIN(hlen, vlen);
int cat = 2;
uint64_t k = 0;
k = (k << TRANSCONFIGBIT) | (transConfig & ~(~(uint64_t)0 << TRANSCONFIGBIT));
k = (k << LOG2LENBIT) | (max & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << LOG2LENBIT) | (min & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t keyPath(int baseTypeID, int log2len, int dir, int level, int config) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 3;
uint64_t k = 0;
k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT));
k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t keyPathConfig(int baseTypeID, int log2len, int dir, int level, int config) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 4;
uint64_t k = 0;
k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT));
k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t planMap_getU64(uint64_t key) {
char *s = ArrayMap_get(planMap, key);
if (s == NULL) return 0;
uint64_t ret;
if (sscanf(s, "%" SCNx64, &ret) != 1) return 0;
return ret;
}
static void planMap_putU64(uint64_t key, uint64_t value) {
char *s = malloc(100);
sprintf(s, "%" PRIx64, value);
s = ArrayMap_put(planMap, key, s);
if (s != NULL) free(s);
}
int PlanManager_loadMeasurementResultsP(SleefDFT *p, int pathCat) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
initPlanMapLock();
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
int stat = planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10));
if (stat == 0) {
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return 0;
}
int ret = 1;
for(int j = p->log2len;j >= 0;j--) {
p->bestPath[j] = planMap_getU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat));
p->bestPathConfig[j] = planMap_getU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat));
if (p->bestPath[j] > MAXBUTWIDTH) ret = 0;
}
p->pathLen = 0;
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++;
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return ret;
}
void PlanManager_saveMeasurementResultsP(SleefDFT *p, int pathCat) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
initPlanMapLock();
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
if (planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)) != 0) {
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return;
}
for(int j = p->log2len;j >= 0;j--) {
planMap_putU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPath[j]);
planMap_putU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPathConfig[j]);
}
planMap_putU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10), 1);
if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile();
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
}
int PlanManager_loadMeasurementResultsT(SleefDFT *p) {
assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD));
initPlanMapLock();
int ret = 0;
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
p->tmNoMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0));
p->tmMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1));
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return p->tmNoMT != 0;
}
void PlanManager_saveMeasurementResultsT(SleefDFT *p) {
assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD));
initPlanMapLock();
int ret = 0;
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0), p->tmNoMT);
planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1), p->tmMT );
if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile();
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
}
|
simd.c |
#include <omp.h>
#include <stdio.h>
#include <stdbool.h>
#include <sys/time.h>
//#include "data.h"
//#include "./no_backup/asia_data.h"
//#include "./no_backup/wilt_data.h"
#include "./no_backup/tretail_data.h"
//extern int batch_sz;
//extern int N_for_threads;
//extern int N_layers;
//extern int *actual_layer_len;
//extern int tot_layer_len;
int par_for(int batch_sz, int N_for_threads, int N_layers, int *layer_len, int tot_layer_len,
int * cum_layer_len,
float *res, bool *op, int *ptr_0, int *ptr_1) {
//int cum_layer_len = 0;
for (int l=0; l< N_layers; l++) {
//printf("%d\n", l);
#pragma omp parallel
{
#pragma omp for
for (int t = 0; t< N_for_threads; t++) {
//printf("%d %d\n", l, t);
for (int layer_l = 0; layer_l< layer_len[t*N_layers + l]; layer_l++) {
//printf("%d %d %d\n", l, t, layer_l);
int cum_layer_l = cum_layer_len[t*(N_layers + 1) + l] + layer_l;
int idx=cum_layer_l + t* tot_layer_len;
//printf("cum_layer_l: %d, idx: %d\n", cum_layer_l, idx);
#pragma omp simd simdlen(32)
for (int b = 0; b< batch_sz; b++) {
float in_0= res[ptr_0[idx] * batch_sz + b];
float in_1= res[ptr_1[idx] * batch_sz + b];
res[ptr_out[idx] * batch_sz + b]= op[idx]? in_0 * in_1 : in_0 + in_1;
//printf("l, t, layer_l, b: %d, %d, %d, %d\n", l, t, layer_l, b);
}
//printf("l, t, layer_l: %d, %d, %d\n", l, t, layer_l);
}
}
}
//cum_layer_len += layer_len[l];
}
}
int main() {
//float * res;
//bool* op;
//int * ptr_0;
//int * ptr_1;
par_for(batch_sz, N_for_threads, N_layers, layer_len, tot_layer_len,
cum_layer_len,
res,op,ptr_0,ptr_1);
int n_iter= 1e3;
struct timeval start, end;
gettimeofday(&start, NULL);
for(int i=0; i< n_iter; i++) {
par_for(batch_sz, N_for_threads, N_layers, layer_len, tot_layer_len,
cum_layer_len,
res,op,ptr_0,ptr_1);
}
gettimeofday(&end, NULL);
float delta = ((end.tv_sec - start.tv_sec) * 1000000u +
end.tv_usec - start.tv_usec) / 1.e6;
//for(int b=0; b< batch_sz; b++) {
for(int b=0; b< 8; b++) {
printf("results %f, actual: %f\n", res[(n_tot-1)*batch_sz + b], golden_val);
}
//printf("results %f, actual: %f\n", res[0], golden_val);
printf("%f s, batch_sz= %d, n_iter= %d\n", delta, batch_sz, n_iter);
printf("%f s per batch\n", delta/n_iter);
}
|
GB_unaryop__minv_int16_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_int8
// op(A') function: GB_tran__minv_int16_int8
// C type: int16_t
// A type: int8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_int8
(
int16_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_uint32
// op(A') function: GB_tran__lnot_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
interpolate_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/pten/core/hostdevice.h"
namespace paddle {
namespace operators {
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
inline std::vector<int> get_new_shape(
const std::vector<const Tensor*>& list_new_shape_tensor) {
// get tensor from
std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}),
platform::errors::InvalidArgument(
"The shape of dimension tensor should be [1],"
"but received d%.",
tensor->dims()));
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
} else {
vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
}
}
return vec_new_shape;
}
template <typename T>
inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
std::vector<T> vec_new_data;
auto* new_data = new_data_tensor->data<T>();
framework::Tensor cpu_starts_tensor;
if (platform::is_gpu_place(new_data_tensor->place())) {
paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(),
&cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel());
return vec_new_data;
}
inline void ExtractNCDWH(const framework::DDim& dims,
const DataLayout& data_layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
if (dims.size() == 3) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2];
*D = 1;
*H = 1;
*W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
} else if (dims.size() == 4) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3];
*D = 1;
*H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
} else {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4];
*D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
*W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3];
}
}
template <typename T>
static void NearestNeighborInterpolate(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) = input_t(i, j, in_k, in_l);
} else {
output_t(i, k, l, j) = input_t(i, in_k, in_l, j);
}
}
}
}
}
}
template <typename T>
static void LinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_w, const int in_w,
const int n, const int c, const int out_w,
const bool align_corners, const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 3>::From(input);
auto output_t = EigenTensor<T, 3>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(3)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int l = 0; l < out_w; l++) {
// linear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vx_w[l]) * vd_e[l] +
input_t(i, j, vx_e[l]) * vd_w[l];
output_t(i, j, l) = out_t;
} else {
out_t = input_t(i, vx_w[l], j) * vd_e[l] +
input_t(i, vx_e[l], j) * vd_w[l];
output_t(i, l, j) = out_t;
}
}
}
}
}
template <typename T>
static void LinearInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_w,
const int in_w, const int n, const int c,
const int out_w, const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 3>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 3>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// linear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, l);
input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e);
input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w);
} else {
const T grad = output_grad_t(i, l, j);
input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e);
input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w);
}
}
}
}
}
template <typename T>
static void BilinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int k = 0; k < out_h; k++) { // loop for images
for (int l = 0; l < out_w; l++) {
// bilinear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] +
input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] +
input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] +
input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l];
output_t(i, j, k, l) = out_t;
} else {
out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] +
input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] +
input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] +
input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l];
output_t(i, k, l, j) = out_t;
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolation(
const Tensor& input, Tensor* output, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const bool align_mode,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 5>::From(input);
auto output_t = EigenTensor<T, 5>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vt_f, vt_b;
std::vector<float> vd_f, vd_b;
vt_f.reserve(out_d);
vt_b.reserve(out_d);
vd_f.reserve(out_d);
vd_b.reserve(out_d);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int j = 0; j < out_d; j++) {
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
{
vt_f[j] = t_f;
vt_b[j] = t_b;
vd_f[j] = d_f;
vd_b[j] = d_b;
}
}
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(5)
#endif
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
for (int j = 0; j < out_d; j++) { // loop for D, H, W
for (int k = 0; k < out_h; k++) {
for (int l = 0; l < out_w; l++) {
// trilinear interpolation
if (data_layout == DataLayout::kNCHW) {
T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, i, j, k, l) = out_t;
} else {
T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, j, k, l, i) = out_t;
}
}
}
}
}
}
}
template <typename T>
HOSTDEVICE inline T cubic_convolution1(T x, T A) {
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
template <typename T>
HOSTDEVICE inline T cubic_convolution2(T x, T A) {
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
template <typename T>
HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) {
T A = -0.75;
T x1 = t;
coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A);
coeffs[1] = cubic_convolution1<T>(x1, A);
// opposite coefficients
T x2 = 1.0 - t;
coeffs[2] = cubic_convolution1<T>(x2, A);
coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A);
}
template <typename T>
static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) {
T coeffs[4];
get_cubic_upsample_coefficients<T>(coeffs, t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
static void BicubicInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
const T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
const T x_t = x_n - input_x;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
T coefficients[4];
// interp 4 times in x direction
for (int ii = 0; ii < 4; ii++) {
int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1),
static_cast<int>(0));
int access_x_0 =
std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0));
int access_x_1 =
std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0));
int access_x_2 =
std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0));
int access_x_3 =
std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
coefficients[ii] =
cubic_interp<T>(input_t(i, j, access_y, access_x_0),
input_t(i, j, access_y, access_x_1),
input_t(i, j, access_y, access_x_2),
input_t(i, j, access_y, access_x_3), x_t);
} else {
coefficients[ii] =
cubic_interp<T>(input_t(i, access_y, access_x_0, j),
input_t(i, access_y, access_x_1, j),
input_t(i, access_y, access_x_2, j),
input_t(i, access_y, access_x_3, j), x_t);
}
}
// interp y direction
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
} else {
output_t(i, k, l, j) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
}
}
}
}
}
}
template <typename T>
static void NearestNeighborInterpolateGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int n, const int c, const int out_h,
const int out_w, const bool align_corners, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l);
} else {
input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j);
}
}
}
}
}
}
template <typename T>
static void BilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w, const bool align_corners,
const int align_mode, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int k = 0; k < out_h; k++) { // loop for images
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w);
} else {
const T grad = output_grad_t(i, k, l, j);
input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w);
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 5>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 5>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int j = 0; j < out_d; j++) { // loop for D
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
for (int k = 0; k < out_h; k++) { // loop for H
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) { // loop for W
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
// trilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(b, i, j, k, l);
input_grad_t(b, i, t_f, y_n, x_w) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, i, t_f, y_n, x_e) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, i, t_f, y_s, x_w) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, i, t_f, y_s, x_e) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, i, t_b, y_n, x_w) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, i, t_b, y_n, x_e) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, i, t_b, y_s, x_w) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, i, t_b, y_s, x_e) +=
static_cast<T>(grad * d_f * d_n * d_w);
} else {
const T grad = output_grad_t(b, j, k, l, i);
input_grad_t(b, t_f, y_n, x_w, i) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, t_f, y_n, x_e, i) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, t_f, y_s, x_w, i) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, t_f, y_s, x_e, i) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, t_b, y_n, x_w, i) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, t_b, y_n, x_e, i) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, t_b, y_s, x_w, i) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, t_b, y_s, x_e, i) +=
static_cast<T>(grad * d_f * d_n * d_w);
}
}
}
}
}
}
}
template <typename T>
static void BicubicInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h,
const int in_w, const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
T x_t = x_n - input_x;
T x_coeffs[4];
T y_coeffs[4];
get_cubic_upsample_coefficients<T>(x_coeffs, x_t);
get_cubic_upsample_coefficients<T>(y_coeffs, y_t);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bicubic interpolation grad
for (int ii = 0; ii < 4; ii++) {
for (int jj = 0; jj < 4; jj++) {
int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1),
static_cast<int>(0));
int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1),
static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, access_y, access_x) +=
grad * y_coeffs[jj] * x_coeffs[ii];
} else {
T grad = output_grad_t(i, k, l, j);
input_grad_t(i, access_y, access_x, j) +=
grad * y_coeffs[jj] * x_coeffs[ii];
}
}
}
}
}
}
}
}
template <typename T>
static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
}
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("linear" == interp_method) {
LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
}
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, align_mode,
data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h,
out_w, align_corners, data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, data_layout);
}
}
template <typename T>
static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
}
PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument(
"out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d,
in_h, in_w, n, c, out_d, out_h, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_w};
} else {
dim_grad = {n, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("linear" == interp_method) {
LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c,
out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_h, in_w};
} else {
dim_grad = {n, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
in_h, in_w, n, c, out_h, out_w, align_corners,
align_mode, data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
n, c, out_h, out_w, align_corners,
data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h,
in_w, n, c, out_h, out_w, align_corners,
data_layout);
}
}
template <typename T>
static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_d, in_h, in_w};
} else {
dim_grad = {n, in_d, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolationGrad<T>(
output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n,
c, out_d, out_h, out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
class InterpolateKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto input_dims = input->dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCPUFwd<T>(ctx, *input, output);
}
}
};
template <typename T>
class InterpolateGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto output_grad_dims = output_grad->dims();
if (output_grad_dims.size() == 3) { // 1D interpolation grad
Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 4) { // 2D interpolation grad
Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 5) { // 3D interpolation grad
Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad);
}
}
};
} // namespace operators
} // namespace paddle
|
Example_task_reduction.2.c | /*
* @@name: task_reduction.2.c
* @@type: C
* @@compilable: yes, omp_5.0
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
int main(void){
int N=100, M=10;
int i, x;
// USE CASE 1 explicit-task reduction + parallel reduction clause
x=0;
#pragma omp parallel num_threads(M) reduction(task,+:x)
{
x++; // implicit task reduction statement
#pragma omp single
for(i=0;i<N;i++)
#pragma omp task in_reduction(+:x)
x++;
}
printf("x=%d =M+N\n",x); // x= 110 =M+N
// USE CASE 2 task reduction + worksharing reduction clause
x=0;
#pragma omp parallel for num_threads(M) reduction(task,+:x)
for(i=0; i< N; i++){
x++;
if( i%2 == 0){
#pragma omp task in_reduction(+:x)
x--;
}
}
printf("x=%d =N-N/2\n",x); // x= 50 =N-N/2
return 0;
}
|
SirFAST.c | /*
* Copyright (c) <2008 - 2020>, University of Washington, Simon Fraser University, Bilkent University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* - Neither the names of the University of Washington, Simon Fraser University,
* nor the names of its contributors may be
* used to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Authors:
Farhad Hormozdiari
Faraz Hach
Can Alkan
Emails:
farhadh AT uw DOT edu
fhach AT cs DOT sfu DOT ca
calkan AT cs DOT bilkent DOT edu DOT tr
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <dirent.h>
#include <xmmintrin.h>
#include <emmintrin.h>
#include <mmintrin.h>
#include <omp.h>
#include "Common.h"
#include "Reads.h"
#include "HashTable.h"
#include "Output.h"
#include "SirFAST.h"
#include "RefGenome.h"
#define min(a,b) ((a)>(b)?(b):(a))
#define min3(a,b,c) ((a)>(b)?(b>c?c:b):(a>c?c:a))
#define CHARCODE(a) (a=='A' ? 0 : (a=='C' ? 1 : (a=='G' ? 2 : (a=='T' ? 3 : 4))))
#define MAX_REF_SIZE 18
#define KEY_LENGTH 10
#define KEY_LENGTH0 5
#define INDEL_GAP 3
#define EXPECTED_GAP 2
#define INITIAL_GAP01 -1
#define INITIAL_GAP12 0
#define INITIAL_GAP23 5
#define DEBUG 2
#define TEST_KEY_NUM 3
char *versionNumberF = "0.4";
long long verificationCnt = 0;
long long mappingCnt = 0;
long long mappedSeqCnt = 0;
long long completedSeqCnt = 0;
char *mappingOutput;
char *_msf_refGen = NULL;
int _msf_refGenLength = 0;
int _msf_refGenOffset = 0;
char *_msf_refGenName = NULL;
int _msf_refGenBeg;
int _msf_refGenEnd;
IHashTable *_msf_hashTable = NULL;
int *_msf_samplingLocsEnds;
Read *_msf_seqList;
int _msf_seqListSize;
int _msf_totalSeqListSize;
Pair *_msf_sort_seqList = NULL;
int *_msf_map_sort_seqList;
ReadIndexTable *_msf_rIndex = NULL;
int _msf_rIndexSize;
int _msf_rIndexMax;
int **_msf_verifiedLocs = NULL;
int *_msf_seqHits;
int _msf_openFiles = 0;
int _msf_maxLSize = 0;
int _msf_maxRSize = 0;
MappingInfo *_msf_mappingInfo;
BestFullMappingInfo *bestHitMappingInfo;
int _msf_maxFile = 0;
char _msf_fileName[4000][200][2][FILE_NAME_LENGTH];
int _msf_fileCount[4000];
char *_msf_readHasConcordantMapping;
int *_msf_oeaMapping;
int *_msf_discordantMapping;
/************************************************************************************************************/
int compare(const void *a, const void *b) {
return ((Pair *) a)->hv - ((Pair *) b)->hv;
}
float str2int(char *str, int index1, int index2) {
char tmp[SEQ_MAX_LENGTH];
strncpy(tmp, &str[index1], index2 - index1);
tmp[index2 - index1] = '\0';
return atol(tmp);
}
void initBestMapping(int totalReadNumber)
{
int i = 0;
bestHitMappingInfo = getMem(totalReadNumber * sizeof(BestFullMappingInfo), "bestHitMappingInfo @initBestMapping()");
for (i = 0; i < totalReadNumber; i++) {
bestHitMappingInfo[i].loc = -1;
bestHitMappingInfo[i].tprob = 0.0;
}
}
void finalizeBestConcordantDiscordant() {
int i = 0;
for (i = 0; i < _msf_seqListSize / 2; i++) {
outputPairFullMappingInfo(i);
}
freeMem(bestHitMappingInfo, _msf_seqListSize * sizeof(BestFullMappingInfo), "bestHitMappingInfo @finalizeBestConcordantDiscordant()");
}
void finalizeBestSingleMapping()
{
int i = 0;
char *_tmpQual, *_tmpSeq;
char rqual[SEQ_LENGTH + 1];
SAM _msf_output;
OPT_FIELDS _msf_optionalFields[2];
rqual[SEQ_LENGTH] = '\0';
for(i = 0; i < _msf_seqListSize; i++) {
if(_msf_seqList[i].hits[0] != 0) {
if (bestHitMappingInfo[i].dir) {
reverse(_msf_seqList[i].qual, rqual, SEQ_LENGTH);
_tmpQual = rqual;
_tmpSeq = _msf_seqList[i].rseq;
}
else {
_tmpQual = _msf_seqList[i].qual;
_tmpSeq = _msf_seqList[i].seq;
}
_msf_output.QNAME = _msf_seqList[i].name;
_msf_output.FLAG = 16 * bestHitMappingInfo[i].dir;
_msf_output.RNAME = bestHitMappingInfo[i].chr;
_msf_output.POS = bestHitMappingInfo[i].loc;
_msf_output.MAPQ = mapQ(i);
_msf_output.CIGAR = bestHitMappingInfo[i].cigar;
_msf_output.MRNAME = "*";
_msf_output.MPOS = 0;
_msf_output.ISIZE = 0;
_msf_output.SEQ = _tmpSeq;
_msf_output.QUAL = _tmpQual;
_msf_output.optSize = 2;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = bestHitMappingInfo[i].err;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = bestHitMappingInfo[i].md;
output(_msf_output, 0);
}
}
freeMem(bestHitMappingInfo, _msf_seqListSize * sizeof(FullMappingInfo), "bestHitMappingInfo @finalizeBestSingleMapping()");
}
void preProcessReads() {
int i = 0;
_msf_sort_seqList = getMem(_msf_seqListSize * sizeof(Pair), "_msf_sort_seqList @preProcessReads()");
for (i = 0; i < _msf_seqListSize; i++) {
_msf_sort_seqList[i].hv = hashVal(_msf_seqList[i].seq);
_msf_sort_seqList[i].readNumber = i;
}
qsort(_msf_sort_seqList, _msf_seqListSize, sizeof(Pair), compare);
_msf_map_sort_seqList = getMem(_msf_seqListSize * sizeof(int), "_msf_map_sort_seqList @preProcessReads()");
for (i = 0; i < _msf_seqListSize; i++)
_msf_map_sort_seqList[_msf_seqList[i].readNumber] = i;
}
void resetFAST(unsigned int seqListSize) {
freeMem(_msf_samplingLocsEnds, 1, "_msf_samplingLocsEnds @resetFAST()");
_msf_samplingLocsEnds = NULL;
freeMem(_msf_oeaMapping, _msf_seqListSize * sizeof(int), "_msf_oeaMapping @resetFAST()");
freeMem(_msf_discordantMapping, _msf_seqListSize * sizeof(int), "_msf_discordantMapping @resetFAST()");
freeMem(_msf_sort_seqList, _msf_seqListSize * sizeof(Pair), "_msf_sort_seqList @resetFAST()");
freeMem(_msf_map_sort_seqList, _msf_seqListSize * sizeof(int), "_msf_map_sort_seqList @resetFAST()");
if (pairedEndMode) {
freeMem(_msf_mappingInfo, seqListSize * sizeof (MappingInfo), "_msf_mappingInfo @resetFAST()");
freeMem(_msf_seqHits, _msf_seqListSize * sizeof(int), "_msf_seqHits @resetFAST()");
_msf_seqHits = NULL;
freeMem(_msf_readHasConcordantMapping, _msf_seqListSize / 2 * sizeof(char), "_msf_readHasConcordantMapping @resetFAST()");
_msf_refGenOffset = 0;
}
}
void finalizeFAST() {
freeMem(_msf_seqHits, (_msf_seqListSize) * sizeof(int), "_msf_seqHits @finalizetFAST()");
freeMem(_msf_refGenName, 4 * SEQ_LENGTH, "_msf_refGenName @finalizeFAST()");
freeMem(_msf_map_sort_seqList, sizeof(Pair) * _msf_seqListSize, "_msf_map_sort_seqList @finalizeFAST()");
freeMem(_msf_sort_seqList, sizeof(int) * _msf_seqListSize, "_msf_sort_seqList @finalizeFAST()");
}
int addCigarSize(int cnt) {
if (cnt < 10)
return 1;
else if (cnt < 100)
return 2;
return 3;
}
/*
Generate Cigar from the back tracking matrix
*/
/*
void generateCigar(char *matrix, int matrixLength, char *cigar) {
int i = 0;
int counterM = 0;
int counterI = 0;
int counterD = 0;
int cigarSize = 0;
cigar[0] = '\0';
while (i < matrixLength) {
if (matrix[i] == 'M') {
counterM++;
if (counterI != 0) {
sprintf(cigar, "%s%dI", cigar, counterI);
cigarSize += addCigarSize(counterI) + 1;
cigar[cigarSize] = '\0';
counterI = 0;
} else if (counterD != 0) {
sprintf(cigar, "%s%dD", cigar, counterD);
cigarSize += addCigarSize(counterD) + 1;
cigar[cigarSize] = '\0';
counterD = 0;
}
} else if (matrix[i] == 'I') {
if (counterM != 0) {
sprintf(cigar, "%s%dM", cigar, counterM);
cigarSize += addCigarSize(counterM) + 1;
cigar[cigarSize] = '\0';
counterM = 0;
} else if (counterD != 0) {
sprintf(cigar, "%s%dD", cigar, counterD);
cigarSize += addCigarSize(counterD) + 1;
cigar[cigarSize] = '\0';
counterD = 0;
}
counterI++;
i++;
} else if (matrix[i] == 'D') {
if (counterM != 0) {
sprintf(cigar, "%s%dM", cigar, counterM);
cigarSize += addCigarSize(counterM) + 1;
cigar[cigarSize] = '\0';
counterM = 0;
} else if (counterI != 0) {
sprintf(cigar, "%s%dI", cigar, counterI);
cigarSize += addCigarSize(counterI) + 1;
cigar[cigarSize] = '\0';
counterI = 0;
}
counterD++;
i++;
} else {
counterM++;
if (counterI != 0) {
sprintf(cigar, "%s%dI", cigar, counterI);
cigarSize += addCigarSize(counterI) + 1;
cigar[cigarSize] = '\0';
counterI = 0;
} else if (counterD != 0) {
sprintf(cigar, "%s%dD", cigar, counterD);
cigarSize += addCigarSize(counterD) + 1;
cigar[cigarSize] = '\0';
counterD = 0;
}
}
i++;
}
if (counterM != 0) {
sprintf(cigar, "%s%dM", cigar, counterM);
cigarSize += addCigarSize(counterM) + 1;
cigar[cigarSize] = '\0';
counterM = 0;
} else if (counterI != 0) {
sprintf(cigar, "%s%dI", cigar, counterI);
cigarSize += addCigarSize(counterI) + 1;
cigar[cigarSize] = '\0';
counterI = 0;
} else if (counterD != 0) {
sprintf(cigar, "%s%dD", cigar, counterD);
cigarSize += addCigarSize(counterD) + 1;
cigar[cigarSize] = '\0';
counterD = 0;
}
cigar[cigarSize] = '\0';
}
*/
/*
Creates the Cigar output from the mismatching positions format [0-9]+(([ACTGN]|\^[ACTGN]+)[0-9]+)*
*/
/*
void generateCigarFromMD(char *mismatch, int mismatchLength, char *cigar) {
int i = 0;
int j = 0;
int start = 0;
int cigarSize = 0;
cigar[0] = '\0';
while (i < mismatchLength) {
if (mismatch[i] >= '0' && mismatch[i] <= '9') {
start = i;
while (mismatch[i] >= '0' && mismatch[i] <= '9'
&& i < mismatchLength)
i++;
int value = atoi(mismatch + start);
for (j = 0; j < value - 1; j++) {
cigar[cigarSize] = 'M';
cigarSize++;
}
cigar[cigarSize] = 'M';
} else if (mismatch[i] == '^') {
cigar[cigarSize] = 'I';
i++;
} else if (mismatch[i] == '\'') {
cigar[cigarSize] = 'D';
i++;
} else {
cigar[cigarSize] = 'M';
cigarSize++;
}
cigarSize++;
i++;
}
cigar[cigarSize] = '\0';
}
*/
void generateSNPSAM(char *matrix, int matrixLength, char *outputSNP) {
int i = 0;
int counterM = 0;
int counterD = 0;
char delete[100];
int snpSize = 0;
outputSNP[0] = '\0';
delete[0] = '\0';
while (i < matrixLength) {
if (matrix[i] == 'M') {
counterM++;
if (counterD != 0) {
delete[counterD] = '\0';
counterD = 0;
sprintf(outputSNP, "%s^%s", outputSNP, delete);
snpSize += strlen(delete) + 1;
outputSNP[snpSize] = '\0';
delete[0] = '\0';
}
} else if (matrix[i] == 'D') {
if (counterM != 0) {
sprintf(outputSNP, "%s%d", outputSNP, counterM);
snpSize += addCigarSize(counterM);
outputSNP[snpSize] = '\0';
counterM = 0;
delete[counterD] = matrix[i + 1];
i++;
counterD++;
} else if (counterD != 0) {
delete[counterD] = matrix[i + 1];
counterD++;
i++;
} else {
delete[counterD] = matrix[i + 1];
counterD++;
i++;
}
} else if (matrix[i] == 'I') {
if (counterM != 0) {
// sprintf(outputSNP, "%s%d\0", outputSNP, counterM);
//counterM++;
} else if (counterD != 0) {
delete[counterD] = '\0';
sprintf(outputSNP, "%s^%s", outputSNP, delete);
snpSize += strlen(delete) + 1;
outputSNP[snpSize] = '\0';
counterD = 0;
delete[0] = '\0';
}
i++;
} else {
if (counterM != 0) {
sprintf(outputSNP, "%s%d", outputSNP, counterM);
snpSize += addCigarSize(counterM);
outputSNP[snpSize] = '\0';
counterM = 0;
}
if (counterD != 0) {
delete[counterD] = '\0';
counterD = 0;
sprintf(outputSNP, "%s^%s", outputSNP, delete);
snpSize += strlen(delete) + 1;
outputSNP[snpSize] = '\0';
delete[0] = '\0';
}
sprintf(outputSNP, "%s%c", outputSNP, matrix[i]);
snpSize += 1;
outputSNP[snpSize] = '\0';
}
i++;
}
if (counterM != 0) {
sprintf(outputSNP, "%s%d", outputSNP, counterM);
snpSize += addCigarSize(counterM);
outputSNP[snpSize] = '\0';
counterM = 0;
} else if (counterD != 0) {
delete[counterD] = '\0';
sprintf(outputSNP, "%s^%s", outputSNP, delete);
snpSize += strlen(delete) + 1;
outputSNP[snpSize] = '\0';
counterD = 0;
}
outputSNP[snpSize] = '\0';
}
int compareOut(const void *a, const void *b) {
FullMappingInfo *aInfo = (FullMappingInfo *) a;
FullMappingInfo *bInfo = (FullMappingInfo *) b;
return aInfo->loc - bInfo->loc;
}
/************************************************/
/* direction = 0 forward */
/* 1 backward */
/************************************************/
void outputPairFullMappingInfo(int readNumber) {
char *seq1, *seq2, *rseq1, *rseq2, *qual1, *qual2;
char rqual1[SEQ_LENGTH + 1], rqual2[SEQ_LENGTH + 1];
SAM _msf_output;
OPT_FIELDS _msf_optionalFields[8];
rqual1[SEQ_LENGTH] = rqual2[SEQ_LENGTH] = '\0';
seq1 = _msf_seqList[readNumber * 2].seq;
rseq1 = _msf_seqList[readNumber * 2].rseq;
qual1 = _msf_seqList[readNumber * 2].qual;
reverse(_msf_seqList[readNumber * 2].qual, rqual1, SEQ_LENGTH);
seq2 = _msf_seqList[readNumber * 2 + 1].seq;
rseq2 = _msf_seqList[readNumber * 2 + 1].rseq;
qual2 = _msf_seqList[readNumber * 2 + 1].qual;
reverse(_msf_seqList[readNumber * 2 + 1].qual, rqual2, SEQ_LENGTH);
if (bestHitMappingInfo[readNumber * 2].loc == -1 && bestHitMappingInfo[readNumber * 2 + 1].loc == -1) {
return;
}
else {
char *seq;
char *qual;
char d1;
char d2;
int isize;
int proper = 0;
// ISIZE CALCULATION
// The distance between outer edges
isize = abs(
bestHitMappingInfo[readNumber * 2].loc
- bestHitMappingInfo[readNumber * 2 + 1].loc)
+ SEQ_LENGTH - 2;
if (bestHitMappingInfo[readNumber * 2].loc
- bestHitMappingInfo[readNumber * 2 + 1].loc > 0) {
isize *= -1;
}
d1 = (bestHitMappingInfo[readNumber * 2].dir == -1) ? 1 : 0;
d2 = (bestHitMappingInfo[readNumber * 2 + 1].dir == -1) ? 1 : 0;
if (d1) {
seq = rseq1;
qual = rqual1;
} else {
seq = seq1;
qual = qual1;
}
//TODO for CG like SOLID
if ( (d1 && d2) || (!d1 && !d2)) {
proper = 2;
} else {
proper = 0;
}
_msf_output.POS = bestHitMappingInfo[readNumber * 2].loc;
_msf_output.MPOS = bestHitMappingInfo[readNumber * 2 + 1].loc;
_msf_output.FLAG = 1 + proper + 16 * d1 + 32 * d2 + 64;
_msf_output.ISIZE = isize;
_msf_output.SEQ = seq;
_msf_output.QUAL = qual;
_msf_output.QNAME = _msf_seqList[readNumber * 2].name;
_msf_output.RNAME = bestHitMappingInfo[readNumber * 2].chr;
_msf_output.MAPQ = mapQ(readNumber * 2) + mapQ(readNumber * 2 + 1);
_msf_output.CIGAR = bestHitMappingInfo[readNumber * 2].cigar;
_msf_output.MRNAME = "=";
_msf_output.optSize = 2;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = bestHitMappingInfo[readNumber * 2].err;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = bestHitMappingInfo[readNumber * 2].md;
output(_msf_output, 0);
if (d2) {
seq = rseq2;
qual = rqual2;
} else {
seq = seq2;
qual = qual2;
}
_msf_output.POS = bestHitMappingInfo[readNumber * 2 + 1].loc;
_msf_output.MPOS = bestHitMappingInfo[readNumber * 2].loc;
_msf_output.FLAG = 1 + proper + 16 * d2 + 32 * d1 + 128;
_msf_output.ISIZE = -isize;
_msf_output.SEQ = seq;
_msf_output.QUAL = qual;
_msf_output.QNAME = _msf_seqList[readNumber * 2].name;
_msf_output.RNAME = bestHitMappingInfo[readNumber * 2].chr;
_msf_output.MAPQ = mapQ(readNumber * 2) + mapQ(readNumber * 2 + 1);
_msf_output.CIGAR = bestHitMappingInfo[readNumber * 2 + 1].cigar;
_msf_output.MRNAME = "=";
_msf_output.optSize = 2;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = bestHitMappingInfo[readNumber * 2 + 1].err;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = bestHitMappingInfo[readNumber * 2 + 1].md;
output(_msf_output, 0);
}
}
/*
Find the closet one to the c
@return 0: if the x1 is closer to c
1: if the x2 is closer to c
2: if both distance are equal
-1: if error
*/
int findNearest(int x1, int x2, int c) {
if (abs(x1 - c) < abs(x2 - c))
return 0;
else if (abs(x1 - c) > abs(x2 - c))
return 1;
else if (abs(x1 - c) == abs(x2 - c))
return 2;
else
return -1;
}
double mapProb(int readNumber, char *md, int dir, int err){
int i = 0;
int mdlen = strlen(md);
char buf[MAX_CIGAR_SIZE];
int j = 0;
double phred = 0.0;
int errloc = 0;
int errcnt = 0; //since I cannot calculate deletion base quality
buf[0] = 0;
if (err == 0)
return 1.0;
while (i<mdlen){
if (isdigit(md[i])) buf[j++]=md[i++];
else if (isalpha(md[i])){
/* mismatch */
errcnt++;
buf[j] = '\0';
if (j != 0)
errloc += atoi(buf);
else if (i!=0)
errloc++;
j=0; buf[0]=0;
if (dir)
phred += (double) (_msf_seqList[readNumber].qual[SEQ_LENGTH-errloc-1] - 33);
else
phred += (double) (_msf_seqList[readNumber].qual[errloc] - 33);
i++;
}
else if (md[i]=='^'){
/* insertion to the read / deletion from reference */
if (j!=0){
buf[j]=0;
errloc += atoi(buf);
buf[0] = 0;
}
j=0;
i++; /* pass ^ */
while (isalpha(md[i++])) j++;
errloc += j;
j = 0;
}
}
double indel_prob = 1;
if (errcnt != err)
indel_prob = 0.0002 * (err - errcnt);
return pow(10, -1 * (phred / 10)) * indel_prob;
}
int mapQ(int readNumber)
{
int mapqual;
double mapprob;
mapprob = mapProb(readNumber, bestHitMappingInfo[readNumber].md,
bestHitMappingInfo[readNumber].dir, bestHitMappingInfo[readNumber].err);
if (mapprob == bestHitMappingInfo[readNumber].tprob)
mapqual = 40;
else
mapqual = (int) (round(-10.0 * log10(1 - (mapprob / bestHitMappingInfo[readNumber].tprob))));
if (mapqual > 40) mapqual = 40;
return mapqual;
}
void setPairFullMappingInfo(int readNumber, FullMappingInfo mi1,
FullMappingInfo mi2) {
bestHitMappingInfo[readNumber * 2].loc = mi1.loc;
bestHitMappingInfo[readNumber * 2].dir = mi1.dir;
bestHitMappingInfo[readNumber * 2].err = mi1.err;
bestHitMappingInfo[readNumber * 2].score = mi1.score;
snprintf(bestHitMappingInfo[readNumber * 2].chr, MAX_REF_SIZE, "%s",
_msf_refGenName);
strncpy(bestHitMappingInfo[readNumber * 2].md, mi1.md, strlen(mi1.md) + 1);
strncpy(bestHitMappingInfo[readNumber * 2].cigar, mi1.cigar,
strlen(mi1.cigar) + 1);
bestHitMappingInfo[readNumber * 2 + 1].loc = mi2.loc;
bestHitMappingInfo[readNumber * 2 + 1].dir = mi2.dir;
bestHitMappingInfo[readNumber * 2 + 1].err = mi2.err;
bestHitMappingInfo[readNumber * 2 + 1].score = mi2.score;
snprintf(bestHitMappingInfo[readNumber * 2 + 1].chr, MAX_REF_SIZE, "%s",
_msf_refGenName);
strncpy(bestHitMappingInfo[readNumber * 2 + 1].md, mi2.md,
strlen(mi2.md) + 1);
strncpy(bestHitMappingInfo[readNumber * 2 + 1].cigar, mi2.cigar,
strlen(mi2.cigar) + 1);
}
int outputPairedEnd(int pre_unmappedCnt) {
int i = 0;
char cigar[MAX_CIGAR_SIZE];
int tmpOut;
FILE* in1[_msf_openFiles];
FILE* in2[_msf_openFiles];
char fname1[_msf_openFiles][FILE_NAME_LENGTH];
char fname2[_msf_openFiles][FILE_NAME_LENGTH];
// discordant
FILE *out = NULL, *out1 = NULL;
char fname3[FILE_NAME_LENGTH];
char fname4[FILE_NAME_LENGTH];
int meanDistanceMapping = 0;
char rqual1[SEQ_LENGTH + 1];
char rqual2[SEQ_LENGTH + 1];
int tmp = 0;
SAM _msf_output;
OPT_FIELDS _msf_optionalFields[8];
//TODO
loadRefGenome(&_msf_refGen, &_msf_refGenName, &tmpOut);
if (pairedEndDiscordantMode) {
sprintf(fname3, "%s__%s__disc", mappingOutputPath, mappingOutput);
sprintf(fname4, "%s__%s__oea", mappingOutputPath, mappingOutput);
out = fileOpen(fname3, "a");
out1 = fileOpen(fname4, "a");
}
FullMappingInfo *mi1 = getMem(sizeof(FullMappingInfo) * _msf_maxLSize, "mi1 @outputPairedEnd()");
FullMappingInfo *mi2 = getMem(sizeof(FullMappingInfo) * _msf_maxRSize, "mi2 @outputPairedEnd()");
_msf_fileCount[_msf_maxFile] = 0;
for (i = 0; i < _msf_openFiles; i++) {
sprintf(fname1[i], "%s__%s__%s__%d__1.tmp", mappingOutputPath,
_msf_refGenName, mappingOutput, i);
sprintf(_msf_fileName[_msf_maxFile][_msf_fileCount[_msf_maxFile]][0],
"%s", fname1[i]);
sprintf(fname2[i], "%s__%s__%s__%d__2.tmp", mappingOutputPath,
_msf_refGenName, mappingOutput, i);
sprintf(_msf_fileName[_msf_maxFile][_msf_fileCount[_msf_maxFile]][1],
"%s", fname2[i]);
in1[i] = fileOpen(fname1[i], "r");
in2[i] = fileOpen(fname2[i], "r");
_msf_fileCount[_msf_maxFile]++;
}
_msf_maxFile++;
int size;
int j, k;
int size1, size2;
meanDistanceMapping =
(pairedEndDiscordantMode == 1) ?
(minPairEndedDiscordantDistance
+ maxPairEndedDiscordantDistance) / 2 + SEQ_LENGTH :
(minPairEndedDistance + maxPairEndedDistance) / 2
+ SEQ_LENGTH;
for (i = 0; i < _msf_seqListSize / 2; i++) {
size1 = size2 = 0;
for (j = 0; j < _msf_openFiles; j++) {
tmpOut = fread(&size, sizeof(int), 1, in1[j]);
if (size > 0) {
for (k = 0; k < size; k++) {
mi1[size1 + k].dir = 1;
tmpOut = fread(&(mi1[size1 + k].loc), sizeof(int), 1, in1[j]);
tmpOut = fread(&(mi1[size1 + k].err), sizeof(int), 1,in1[j]);
tmpOut = fread(&(mi1[size1 + k].cigarSize), sizeof(int), 1, in1[j]);
tmpOut = fread((mi1[size1 + k].cigar), sizeof(char), mi1[size1 + k].cigarSize, in1[j]);
mi1[size1 + k].cigar[mi1[size1 + k].cigarSize] = '\0';
tmpOut = fread(&(mi1[size1 + k].mdSize), sizeof(int), 1, in1[j]);
tmpOut = fread((mi1[size1 + k].md), sizeof(char), (mi1[size1 + k].mdSize), in1[j]);
mi1[size1 + k].md[mi1[size1 + k].mdSize] = '\0';
if (mi1[size1 + k].loc < 1) {
mi1[size1 + k].loc *= -1;
mi1[size1 + k].dir = -1;
}
}
qsort(mi1 + size1, size, sizeof(FullMappingInfo), compareOut);
size1 += size;
}
}
for (j = 0; j < _msf_openFiles; j++) {
tmpOut = fread(&size, sizeof(int), 1, in2[j]);
if (size > 0) {
for (k = 0; k < size; k++) {
mi2[size2 + k].dir = 1;
tmpOut = fread(&(mi2[size2 + k].loc), sizeof(int), 1, in2[j]);
tmpOut = fread(&(mi2[size2 + k].err), sizeof(int), 1, in2[j]);
tmpOut = fread(&(mi2[size2 + k].cigarSize), sizeof(int), 1, in2[j]);
tmpOut = fread((mi2[size2 + k].cigar), sizeof(char), mi2[size2 + k].cigarSize, in2[j]);
mi2[size2 + k].cigar[mi2[size2 + k].cigarSize] = '\0';
tmpOut = fread(&(mi2[size2 + k].mdSize), sizeof(int), 1, in2[j]);
tmpOut = fread((mi2[size2 + k].md), sizeof(char), mi2[size2 + k].mdSize, in2[j]);
mi2[size2 + k].md[mi2[size2 + k].mdSize] = '\0';
if (mi2[size2 + k].loc < 1) {
mi2[size2 + k].loc *= -1;
mi2[size2 + k].dir = -1;
}
}
qsort(mi2 + size2, size, sizeof(FullMappingInfo), compareOut);
size2 += size;
}
}
int lm, ll, rl, rm;
int pos = 0;
if (pairedEndDiscordantMode) {
for (j = 0; j < size1; j++) {
lm = mi1[j].loc - maxPairEndedDiscordantDistance + 1;
ll = mi1[j].loc - minPairEndedDiscordantDistance + 1;
rl = mi1[j].loc + minPairEndedDiscordantDistance - 1;
rm = mi1[j].loc + maxPairEndedDiscordantDistance - 1;
while (pos < size2 && mi2[pos].loc < lm) {
pos++;
}
k = pos;
while (k < size2 && mi2[k].loc <= rm) {
if (mi2[k].loc <= ll || mi2[k].loc >= rl) {
if ( (mi1[j].dir == 1 && mi2[k].dir == 1)
|| (mi1[j].dir == -1 && mi2[k].dir == -1)) {
_msf_seqList[i * 2].hits[0] = 1;
_msf_seqList[i * 2 + 1].hits[0] = 1;
if (nosamMode != 0) {
size1 = 0;
size2 = 0;
}
break;
}
}
k++;
}
}
_msf_seqHits[i * 2] += size1;
_msf_seqHits[i * 2 + 1] += size2;
if (_msf_seqHits[i * 2 + 1] * _msf_seqHits[i * 2]
> DISCORDANT_CUT_OFF && nosamMode != 0) {
_msf_seqList[i * 2].hits[0] = 1;
_msf_seqList[i * 2 + 1].hits[0] = 1;
size1 = 0;
size2 = 0;
}
int rNo = 0;
int loc = 0;
int err = 0;
float sc = 0;
char l = 0;
//write the OEA data
if (_msf_seqHits[i * 2] == 0){
for (k = 0; k < size2 && _msf_oeaMapping[i * 2 + 1] < maxOEAOutput; k++) {
rNo = i * 2 + 1;
loc = mi2[k].loc * mi2[k].dir;
err = mi2[k].err;
sc = mi2[k].score;
l = strlen(_msf_refGenName);
tmp = fwrite(&rNo, sizeof(int), 1, out1);
tmp = fwrite(&l, sizeof(char), 1, out1);
tmp = fwrite(_msf_refGenName, sizeof(char), l, out1);
tmp = fwrite(&loc, sizeof(int), 1, out1);
tmp = fwrite(&err, sizeof(int), 1, out1);
tmp = fwrite(&sc, sizeof(float), 1, out1);
if (mi2[k].cigarSize > SEQ_LENGTH || mi2[k].cigarSize <= 0)
printf("ERROR CIGAR size=%d %s\n", mi2[k].cigarSize, _msf_seqList[i * 2 + 1].seq);
tmp = fwrite(&(mi2[k].cigarSize), sizeof(int), 1, out1);
tmp = fwrite((mi2[k].cigar), sizeof(char), mi2[k].cigarSize, out1);
tmp = fwrite(&(mi2[k].mdSize), sizeof(int), 1, out1);
tmp = fwrite((mi2[k].md), sizeof(char), mi2[k].mdSize, out1);
_msf_oeaMapping[i * 2 + 1]++;
}
}
if (_msf_seqHits[i * 2 + 1] == 0){
for (j = 0; j < size1 && _msf_oeaMapping[i * 2] < maxOEAOutput; j++) {
rNo = i * 2;
loc = mi1[j].loc * mi1[j].dir;
err = mi1[j].err;
sc = mi1[j].score;
l = strlen(_msf_refGenName);
tmp = fwrite(&rNo, sizeof(int), 1, out1);
tmp = fwrite(&l, sizeof(char), 1, out1);
tmp = fwrite(_msf_refGenName, sizeof(char), l, out1);
tmp = fwrite(&loc, sizeof(int), 1, out1);
tmp = fwrite(&err, sizeof(int), 1, out1);
tmp = fwrite(&sc, sizeof(float), 1, out1);
if (mi1[j].cigarSize > SEQ_LENGTH || mi1[j].cigarSize <= 0)
printf("ERROR %d %s\n", mi1[j].cigarSize, _msf_seqList[i * 2 + 1].seq);
tmp = fwrite(&(mi1[j].cigarSize), sizeof(int), 1, out1);
tmp = fwrite((mi1[j].cigar), sizeof(char), mi1[j].cigarSize, out1);
tmp = fwrite(&(mi1[j].mdSize), sizeof(int), 1, out1);
tmp = fwrite((mi1[j].md), sizeof(char), mi1[j].mdSize, out1);
_msf_oeaMapping[i * 2]++;
}
}
}
char *seq1, *seq2, *rseq1, *rseq2, *qual1, *qual2;
rqual1[SEQ_LENGTH] = '\0';
rqual2[SEQ_LENGTH] = '\0';
rqual1[0] = '\0';
rqual2[0] = '\0';
seq1 = _msf_seqList[i * 2].seq;
rseq1 = _msf_seqList[i * 2].rseq;
qual1 = _msf_seqList[i * 2].qual;
strncpy(rqual1, _msf_seqList[i * 2].qual, SEQ_LENGTH);
seq2 = _msf_seqList[i * 2 + 1].seq;
rseq2 = _msf_seqList[i * 2 + 1].rseq;
qual2 = _msf_seqList[i * 2 + 1].qual;
strncpy(rqual2, _msf_seqList[i * 2 + 1].qual, SEQ_LENGTH);
if (pairedEndDiscordantMode) {
for (k = 0; k < size1; k++) {
mi1[k].score = calculateScore(mi1[k].loc,
(mi1[k].dir == -1) ? rseq1 : seq1,
(mi1[k].dir == -1) ? rqual1 : qual1, mi1[k].cigar);
}
for (k = 0; k < size2; k++) {
mi2[k].score = calculateScore(mi2[k].loc,
(mi2[k].dir == -1) ? rseq2 : seq2,
(mi2[k].dir == -1) ? rqual2 : qual2, mi2[k].cigar);
}
}
/* CALKAN MAPQ FOR PE */
for (j = 0; j < size1; j++) {
if (mi1[j].err != 0){
bestHitMappingInfo[i*2].tprob += mapProb(i*2, mi1[j].md, mi1[j].dir, mi1[j].err);
}
}
for (k = 0; k < size2; k++) {
if (mi2[k].err != 0){
bestHitMappingInfo[i*2+1].tprob += mapProb((i*2+1), mi2[k].md, mi2[k].dir, mi2[k].err);
}
}
if (pairedEndDiscordantMode) {
for (j = 0; j < size1; j++) {
for (k = 0; k < size2; k++) {
int dir1 = mi1[j].dir;
int dir2 = mi2[k].dir;
int loc1 = mi1[j].loc;
int loc2 = mi2[k].loc;
int best_err1 = bestHitMappingInfo[i * 2].err;
int best_err2 = bestHitMappingInfo[i * 2+1].err;
int best_loc1 = bestHitMappingInfo[i * 2].loc;
int best_loc2 = bestHitMappingInfo[i * 2+1].loc;
if (
(( dir1 > 0 && dir2 > 0 ) || (dir1 < 0 && dir2 < 0)) &&
(( loc1 != -1 || loc2 != -1) &&
(abs(loc1 - loc2) > minPairEndedDiscordantDistance) &&
(abs(loc1 - loc2) < maxPairEndedDiscordantDistance))
) {
//POSSIBLE CONCORDANT
if(_msf_readHasConcordantMapping[i] == 0) {
setPairFullMappingInfo(i, mi1[j], mi2[k]);
_msf_readHasConcordantMapping[i] = 1;
_msf_seqList[i * 2].hits[0] = 1;
_msf_seqList[i * 2 + 1].hits[0] = 1;
} else {
if (best_err1+best_err2 >= mi1[j].err + mi2[k].err) {
if ( best_err1+best_err2 == mi1[j].err + mi2[k].err
&& findNearest(
abs(best_loc1-best_loc2),
abs(loc2-loc1),
meanDistanceMapping) == 0) {
continue;
}
setPairFullMappingInfo(i, mi1[j], mi2[k]);
}
}
}
//DISCORDANT TO TEMP FILE FOR POST PROCESSING
else if (_msf_readHasConcordantMapping[i] == 0
&& _msf_seqHits[i * 2] != 0
&& _msf_seqHits[i * 2 + 1] != 0) {
int rNo = i;
int loc = mi1[j].loc * mi1[j].dir;
int err = mi1[j].err;
float sc = mi1[j].score;
char l = strlen(_msf_refGenName);
if (_msf_discordantMapping[i * 2] < maxDiscordantOutput) {
tmp = fwrite(&rNo, sizeof(int), 1, out);
tmp = fwrite(&l, sizeof(char), 1, out);
tmp = fwrite(_msf_refGenName, sizeof(char), l, out);
tmp = fwrite(&loc, sizeof(int), 1, out);
tmp = fwrite(&err, sizeof(int), 1, out);
tmp = fwrite(&sc, sizeof(float), 1, out);
tmp = fwrite(&(mi1[j].cigarSize), sizeof(int), 1, out);
tmp = fwrite((mi1[j].cigar), sizeof(char), mi1[j].cigarSize, out);
tmp = fwrite(&(mi1[j].mdSize), sizeof(int), 1, out);
tmp = fwrite((mi1[j].md), sizeof(char), mi1[j].mdSize, out);
loc = mi2[k].loc * mi2[k].dir;
err = mi2[k].err;
sc = mi2[k].score;
tmp = fwrite(&loc, sizeof(int), 1, out);
tmp = fwrite(&err, sizeof(int), 1, out);
tmp = fwrite(&sc, sizeof(float), 1, out);
tmp = fwrite(&(mi2[k].cigarSize), sizeof(int), 1, out);
tmp = fwrite((mi2[k].cigar), sizeof(char), mi2[k].cigarSize, out);
tmp = fwrite(&(mi2[k].mdSize), sizeof(int), 1, out);
tmp = fwrite((mi2[k].md), sizeof(char), mi2[k].mdSize, out);
_msf_discordantMapping[i * 2]++;
}
//SET THE BEST DISCORDANT
//BEGIN {Farhad Hormozdiari}
if (best_loc1 == -1 && best_loc2 == -1 && _msf_readHasConcordantMapping[i] == 0) {
setPairFullMappingInfo(i, mi1[j], mi2[k]);
_msf_seqList[i * 2].hits[0] = 1;
_msf_seqList[i * 2 + 1].hits[0] = 1;
} else if (best_err1 + best_err2 >= mi1[j].err + mi2[k].err && _msf_readHasConcordantMapping[i] == 0) {
if (best_err1 + best_err2 == mi1[j].err + mi2[k].err
&& findNearest(
abs(best_loc2-best_loc1),
abs(loc1 - loc2),
meanDistanceMapping) == 0) {
continue;
}
setPairFullMappingInfo(i, mi1[j], mi2[k]);
}
//END {Farhad Hormozdiari}
}
}
}
} else {
for (j = 0; j < size1; j++) {
for (k = 0; k < size2; k++) {
int dir1 = mi1[j].dir;
int dir2 = mi2[k].dir;
int loc1 = mi1[j].loc;
int loc2 = mi2[k].loc;
int best_err1 = bestHitMappingInfo[i * 2].err;
int best_err2 = bestHitMappingInfo[i * 2+1].err;
int best_loc1 = bestHitMappingInfo[i * 2].loc;
int best_loc2 = bestHitMappingInfo[i * 2+1].loc;
if ( abs (mi2[k].loc - mi1[j].loc) >= minPairEndedDistance
&& (abs(mi2[k].loc - mi1[j].loc) <= maxPairEndedDistance)
&& ( (dir1>0 && dir2>0) || ( dir1< 0 && dir2 <0) ) ) {
char *seq;
char *qual;
char d1;
char d2;
int isize;
int proper = 0;
// ISIZE CALCULATION
// The distance between outer edges
isize = abs(mi1[j].loc - mi2[k].loc) + SEQ_LENGTH - 2;
if (mi1[j].loc - mi2[k].loc > 0) {
isize *= -1;
}
d1 = (mi1[j].dir == -1) ? 1 : 0;
d2 = (mi2[k].dir == -1) ? 1 : 0;
//SET THE READ HAS CONCORDANT MAPPING
_msf_readHasConcordantMapping[i] = 1;
if (d1) {
seq = rseq1;
qual = rqual1;
} else {
seq = seq1;
qual = qual1;
}
if ((d1 && d2) || (!d1 && !d2)) {
proper = 2;
} else {
proper = 0;
}
_msf_output.POS = mi1[j].loc;
_msf_output.MPOS = mi2[k].loc;
_msf_output.FLAG = 1 + proper + 16 * d1 + 32 * d2 + 64;
_msf_output.ISIZE = isize;
_msf_output.SEQ = seq;
_msf_output.QUAL = qual;
_msf_output.QNAME = _msf_seqList[i * 2].name;
_msf_output.RNAME = _msf_refGenName;
_msf_output.MAPQ = 255;
_msf_output.CIGAR = cigar;
_msf_output.MRNAME = "=";
_msf_output.optSize = 2;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = mi1[j].err;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = mi1[j].md;
if (!bestMode)
output(_msf_output, 0);
if (d2) {
seq = rseq2;
qual = rqual2;
} else {
seq = seq2;
qual = qual2;
}
_msf_output.POS = mi2[k].loc;
_msf_output.MPOS = mi1[j].loc;
_msf_output.FLAG = 1 + proper + 16 * d2 + 32 * d1 + 128;
_msf_output.ISIZE = -isize;
_msf_output.SEQ = seq;
_msf_output.QUAL = qual;
_msf_output.QNAME = _msf_seqList[i * 2].name;
_msf_output.RNAME = _msf_refGenName;
_msf_output.MAPQ = 255;
_msf_output.CIGAR = cigar;
_msf_output.MRNAME = "=";
_msf_output.optSize = 2;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = mi2[k].err;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = mi2[k].md;
if (!bestMode)
output(_msf_output,0);
//SET THE BEST CONCORDANT
//BEGIN {Farhad Hormozdiari}
if (best_loc1 == -1 && best_loc2 == -1) {
setPairFullMappingInfo(i, mi1[j], mi2[k]);
} else {
if (best_err1 + best_err2 >= mi1[j].err + mi2[k].err) {
if (best_err1+best_err2 == mi1[j].err + mi2[k].err
&& findNearest(
abs(best_loc2 - best_loc1),
abs(loc2 - loc1),
meanDistanceMapping) == 0) {
continue;
}
setPairFullMappingInfo(i, mi1[j], mi2[k]);
}
} //END {Farhad Hormozdiari}
}
}
}
}
}
if (pairedEndDiscordantMode) {
fclose(out);
fclose(out1);
}
for (i = 0; i < _msf_openFiles; i++) {
fclose(in1[i]);
fclose(in2[i]);
unlink(fname1[i]);
unlink(fname2[i]);
}
tmp++;
freeMem(mi1, sizeof(FullMappingInfo) * _msf_maxLSize, "mi1 @outputPairedEnd()");
freeMem(mi2, sizeof(FullMappingInfo) * _msf_maxRSize, "mi2 @outputPairedEnd()");
_msf_openFiles = 0;
/* calkan counter */
int unmappedCnt = 0;
for (i = 0; i < _msf_seqListSize; i++) {
if (_msf_seqHits[i] == 0) unmappedCnt++;
}
unmappedCnt = unmappedCnt + pre_unmappedCnt;
mappedSeqCnt = _msf_totalSeqListSize - unmappedCnt;
return unmappedCnt;
}
float calculateScore(int index, char *seq, char *qual, char *md) {
int i = 0;
int j;
char *ref;
char *ver;
float score = 1;
char tmp[2 * SEQ_MAX_LENGTH];
int value = 0;
int end = 0;
int index1 = 0;
int index2 = 0;
ref = _msf_refGen + index - 1;
ver = seq;
while (1) {
if (i >= strlen(md))
break;
index1 = i;
while (md[i] >= '0' && md[i] <= '9') {
i++;
}
index2 = i;
value = str2int(md, index1, index2);
if (md[i] == 'M') {
for (j = 0; j < value; j++) {
tmp[end] = 'M';
end++;
}
} else if (md[i] == 'I') {
for (j = 0; j < value; j++) {
tmp[end] = 'I';
end++;
}
} else if (md[i] == 'D') {
for (j = 0; j < value; j++) {
tmp[end] = 'D';
end++;
}
}
i++;
}
tmp[end] = '\0';
j = 0;
for (i = 0; i < end; i++) {
if (tmp[i] == 'M') {
if (*ref != *ver) {
score *= 0.001 + 1 / pow(10, ((qual[j] - 33) / 10.0));
}
ref++;
ver++;
j++;
} else if (tmp[i] == 'I') {
ver++;
j++;
score *= 0.0003; // 0.0001 + 0.0002; 0.0001: indel rate in normal human, 0.0002: indel error rate in Illumina
} else if (tmp[i] == 'D') {
ref++;
score *= 0.0003; // 0.0001 + 0.0002
}
}
return score;
}
int matoi(char *str, int start, int end) {
int i = 0;
char tmp[SEQ_MAX_LENGTH];
for (i = 0; i < end - start; i++)
tmp[i] = str[start + i];
tmp[i] = '\0';
return atoi(tmp);
}
void convertCigarToMatrix(char *cigar, int cigar_size, char * matrix) {
int i = 0;
int j = 0;
int start = 0;
int size = 0;
matrix[0] = '\0';
while (i < cigar_size) {
if (cigar[i] >= '0' && cigar[i] <= '9') {
start = i;
while (cigar[i] >= '0' && cigar[i] <= '9' && i < cigar_size)
i++;
int value = matoi(cigar, start, i);
for (j = 0; j < value; j++) {
if (cigar[i] == 'M')
matrix[size] = 'M';
else if (cigar[i] == 'D')
matrix[size] = 'D';
else if (cigar[i] == 'I')
matrix[size] = 'I';
size++;
}
}
i++;
}
matrix[size] = '\0';
}
void convertMDToMatrix(char *md, int md_size, char * matrix) {
int i = 0;
int j = 0;
int start = 0;
int size = 0;
matrix[0] = '\0';
while (i < md_size) {
if (md[i] >= '0' && md[i] <= '9') {
start = i;
while (md[i] >= '0' && md[i] <= '9' && i < md_size)
i++;
int value = matoi(md, start, i);
for (j = 0; j < value; j++) {
matrix[size] = 'M';
size++;
}
i--;
} else if (md[i] == '^') {
matrix[size] = 'D';
size++;
} else {
matrix[size] = md[i];
size++;
}
i++;
}
matrix[size] = '\0';
}
void convertMDCigarToMatrix(char *cigar, int cigar_size, char *md, int md_size,
char *matrix) {
int i = 0;
int j = 0;
int size = 0;
char tmp1[SEQ_MAX_LENGTH];
char tmp2[SEQ_MAX_LENGTH];
convertMDToMatrix(md, md_size, tmp2);
convertCigarToMatrix(cigar, cigar_size, tmp1);
while (i < strlen(tmp1)) {
if (tmp1[i] == 'M') {
if (j < strlen(tmp2)) {
if (tmp2[j] == 'M') {
matrix[size] = 'M';
size++;
}
if (tmp2[j] != 'M') {
matrix[size] = tmp2[j];
size++;
}
} else {
matrix[size] = 'M';
size++;
}
} else if (tmp1[i] == 'D') {
matrix[size] = 'D';
size++;
j++;
matrix[size] = tmp2[j];
size++;
} else if (tmp1[i] == 'I') {
matrix[size] = 'I';
size++;
}
i++;
if (j < strlen(tmp2))
j++;
}
if (strlen(tmp1))
matrix[size] = '\0';
}
void convertInsertion(char * in_matrix, char * seq, char *out_matrix) {
int i = 0;
int j = 0;
int size = 0;
while (i < strlen(in_matrix)) {
if (in_matrix[i] == 'M') {
out_matrix[size] = 'M';
size++;
j++;
} else if (in_matrix[i] == 'D') {
out_matrix[size] = 'D';
size++;
i++;
j++;
out_matrix[size] = seq[j];
j++;
size++;
} else if (in_matrix[i] == 'I') {
out_matrix[size] = 'I';
size++;
out_matrix[size] = seq[j];
size++;
j++;
} else {
out_matrix[size] = in_matrix[i];
size++;
j++;
}
i++;
}
out_matrix[size] = '\0';
}
FILE * initPairedEndDiscPP() {
char fname2[FILE_NAME_LENGTH];
FILE * out;
sprintf(fname2, "%s%s_DIVET.vh", mappingOutputPath, mappingOutput);
out = fileOpen(fname2, "w");
return out;
}
void finalizePairedEndDiscPP(FILE * out) {
fclose(out);
}
void operatePairedEndDiscPP(FILE * out) {
char tmp_matrix1[SEQ_MAX_LENGTH];
char tmp_matrix2[SEQ_MAX_LENGTH];
char matrix1[SEQ_MAX_LENGTH];
char matrix2[SEQ_MAX_LENGTH];
char cigar1[MAX_CIGAR_SIZE];
char editString1[2 * SEQ_MAX_LENGTH];
char cigar2[MAX_CIGAR_SIZE];
char editString2[2 * SEQ_MAX_LENGTH];
char seq1[SEQ_LENGTH + 1];
char seq2[SEQ_LENGTH + 1];
char genName[SEQ_LENGTH];
char fname1[FILE_NAME_LENGTH];
char l;
int l_size;
int loc1, loc2;
int err1, err2;
char dir1, dir2;
float sc1, sc2, lsc = 0;
int flag = 0;
int rNo, lrNo = -1;
int tmp;
FILE *in;
sprintf(fname1, "%s__%s__disc", mappingOutputPath, mappingOutput);
in = fileOpen(fname1, "r");
if (in != NULL) {
flag = fread(&rNo, sizeof(int), 1, in);
}
else {
flag = 0;
}
seq1[SEQ_LENGTH] = '\0';
seq2[SEQ_LENGTH] = '\0';
while (flag) {
tmp = fread(&l, sizeof(char), 1, in);
tmp = fread(genName, sizeof(char), l, in);
genName[(int) l] = '\0';
tmp = fread(&loc1, sizeof(int), 1, in);
tmp = fread(&err1, sizeof(int), 1, in);
tmp = fread(&sc1, sizeof(float), 1, in);
tmp = fread(&l_size, sizeof(int), 1, in);
tmp = fread(cigar1, sizeof(char), l_size, in);
cigar1[(int) l_size] = '\0';
tmp = fread(&l_size, sizeof(int), 1, in);
tmp = fread(editString1, sizeof(char), l_size, in);
editString1[(int) l_size] = '\0';
tmp = fread(&loc2, sizeof(int), 1, in);
tmp = fread(&err2, sizeof(int), 1, in);
tmp = fread(&sc2, sizeof(float), 1, in);
tmp = fread(&l_size, sizeof(int), 1, in);
tmp = fread(cigar2, sizeof(char), l_size, in);
cigar2[(int) l_size] = '\0';
tmp = fread(&l_size, sizeof(int), 1, in);
tmp = fread(editString2, sizeof(char), l_size, in);
editString2[(int) l_size] = '\0';
convertMDCigarToMatrix(cigar1, strlen(cigar1), editString1,
strlen(editString1), tmp_matrix1);
convertMDCigarToMatrix(cigar2, strlen(cigar2), editString2,
strlen(editString2), tmp_matrix2);
/* CHECK FOR SIFAST */
/* CALKAN: GO OVER THIS VERY CAREFULLY FOR PE vs MP */
if (_msf_readHasConcordantMapping[rNo] == 0 && _msf_discordantMapping[rNo * 2] < maxDiscordantOutput ) {
dir1 = dir2 = 'F';
strncpy(seq1, _msf_seqList[rNo * 2].seq, SEQ_LENGTH);
strncpy(seq2, _msf_seqList[rNo * 2 + 1].seq, SEQ_LENGTH);
if (loc1 < 0) {
dir1 = 'R';
loc1 = -loc1;
strncpy(seq1, _msf_seqList[rNo * 2].rseq, SEQ_LENGTH);
}
if (loc2 < 0) {
dir2 = 'R';
loc2 = -loc2;
strncpy(seq2, _msf_seqList[rNo * 2 + 1].rseq, SEQ_LENGTH);
}
convertInsertion(tmp_matrix1, seq1, matrix1);
convertInsertion(tmp_matrix2, seq2, matrix2);
if (rNo != lrNo) {
int j;
for (j = 0; j < SEQ_LENGTH; j++) {
lsc += _msf_seqList[rNo * 2].qual[j]
+ _msf_seqList[rNo * 2 + 1].qual[j];
}
lsc /= 2 * SEQ_LENGTH;
lsc -= 33;
lrNo = rNo;
}
char event = '\0';
if (dir1 == dir2) {
event = 'V';
}
else {
if (pairedEndModePE && loc1 < loc2 && dir1 == 'R' && dir2 == 'F')
event = 'E';
else if (pairedEndModeMP && loc1 < loc2 && dir1 == 'F' && dir2 == 'R')
event = 'E';
else if (pairedEndModePE && loc2 < loc1 && dir1 == 'F' && dir2 == 'R')
event = 'E';
else if (pairedEndModeMP && loc2 < loc1 && dir1 == 'R' && dir2 == 'F')
event = 'E';
else if (abs(loc2 - loc1) >= maxPairEndedDiscordantDistance)
event = 'D';
else
event = 'I';
}
_msf_seqList[rNo * 2].hits[0] = 2;
fprintf(out,
"%s\t%s\t%d\t%d\t%c\t=\t%d\t%d\t%c\t%c\t%d\t%0.0f\t%e\n",
_msf_seqList[rNo * 2].name, genName, loc1,
(loc1 + SEQ_LENGTH - 1), dir1, loc2,
(loc2 + SEQ_LENGTH - 1), dir2, event, (err1 + err2),
lsc, sc1 * sc2);
}
flag = fread(&rNo, sizeof(int), 1, in);
}
tmp++;
fclose(in);
unlink(fname1);
}
FILE * initOEAReads(char *fileName) {
FILE *fp_out1;
char fname1[FILE_NAME_LENGTH];
sprintf(fname1, "%s%s_OEA.sam", mappingOutputPath, mappingOutput);
fp_out1 = fileOpen(fname1, "w");
return fp_out1;
}
void finalizeOEAReads(FILE * fp_out1) {
fclose(fp_out1);
return;
}
void operateOEAReads(FILE * fp_out1) {
FILE * in;
char genName[SEQ_LENGTH];
char fname2[FILE_NAME_LENGTH];
char l = 0;
int loc1 = 0;
int err1;
char d;
float sc1 = 0;
int flag = 0;
int rNo = -1;
int tmp = 0;
int cigarSize = 0;
int mdSize = 0;
char cigar[SEQ_LENGTH + 1];
char md[SEQ_LENGTH + 1];
char *seq1, *seq2, *qual1, *qual2;
char rqual1[SEQ_LENGTH + 1];
SAM _msf_output;
OPT_FIELDS _msf_optionalFields[8];
seq1 = NULL;
seq2 = NULL;
qual1 = NULL;
qual2 = NULL;
rqual1[0] = '\0';
SAMheaderTX(fp_out1, 0);
in = NULL;
if (pairedEndDiscordantMode) {
sprintf(fname2, "%s__%s__oea", mappingOutputPath, mappingOutput);
in = fileOpen(fname2, "r");
}
if (in != NULL) {
flag = fread(&rNo, sizeof(int), 1, in);
} else {
flag = 0;
}
while (flag) {
cigar[0] = '\0';
md[0] = '\0';
tmp = fread(&l, sizeof(char), 1, in);
tmp = fread(genName, sizeof(char), l, in);
genName[(int) l] = '\0';
tmp = fread(&loc1, sizeof(int), 1, in);
tmp = fread(&err1, sizeof(int), 1, in);
tmp = fread(&sc1, sizeof(float), 1, in);
tmp = fread(&cigarSize, sizeof(int), 1, in);
tmp = fread(cigar, sizeof(char), cigarSize, in);
cigar[cigarSize] = '\0';
tmp = fread(&mdSize, sizeof(int), 1, in);
tmp = fread(md, sizeof(char), mdSize, in);
md[mdSize] = '\0';
d = 1;
if (loc1 < 0) {
d = -1;
loc1 *= -1;
seq1 = _msf_seqList[rNo].rseq;
reverse(_msf_seqList[rNo].qual, rqual1, SEQ_LENGTH);
rqual1[SEQ_LENGTH] = '\0';
qual1 = rqual1;
} else {
seq1 = _msf_seqList[rNo].seq;
qual1 = _msf_seqList[rNo].qual;
}
if (rNo % 2 == 0) {
seq2 = _msf_seqList[rNo + 1].seq;
qual2 = _msf_seqList[rNo + 1].qual;
} else {
seq2 = _msf_seqList[rNo - 1].seq;
qual2 = _msf_seqList[rNo - 1].qual;
}
if (_msf_seqHits[rNo] != 0 && _msf_seqHits[rNo] < maxOEAOutput
&& _msf_seqHits[(rNo % 2 == 0) ? rNo + 1 : rNo - 1] == 0) {
_msf_output.POS = loc1;
_msf_output.MPOS = 0;
_msf_output.FLAG =
(rNo % 2 == 0) ? 1 + 4 + 32 * d + 128 : 1 + 8 + 16 * d + 64;
_msf_output.ISIZE = 0;
_msf_output.SEQ = seq1;
_msf_output.QUAL = qual1;
_msf_output.QNAME = _msf_seqList[rNo].name;
_msf_output.RNAME = genName;
_msf_output.MAPQ = 255;
_msf_output.CIGAR = cigar;
_msf_output.MRNAME = "=";
_msf_output.optSize = 4;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = err1;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = md;
//for the OEA reads
_msf_optionalFields[2].tag = "NS";
_msf_optionalFields[2].type = 'Z';
_msf_optionalFields[2].sVal = seq2;
_msf_optionalFields[3].tag = "NQ";
_msf_optionalFields[3].type = 'Z';
_msf_optionalFields[3].sVal = qual2;
outputSAM(fp_out1, _msf_output);
_msf_seqList[rNo].hits[0] = -1;
_msf_seqList[(rNo % 2 == 0) ? rNo + 1 : rNo - 1].hits[0] = -1;
}
else if(_msf_seqHits[rNo] != 0 && _msf_seqHits[(rNo % 2 == 0) ? rNo + 1 : rNo - 1] == 0)
{
_msf_seqList[rNo].hits[0] = -1;
_msf_seqList[(rNo % 2 == 0) ? rNo + 1 : rNo - 1].hits[0] = -1;
}
flag = fread(&rNo, sizeof(int), 1, in);
}
tmp++;
fclose(in);
unlink(fname2);
}
void outputAllTransChromosomal(int flag) {
return;
/* disabled until completed
int i = 0;
int j = 0;
int k = 0;
int l = 0;
FILE *fp_out = NULL;
char fname1[FILE_NAME_LENGTH];
if(flag)
{
fp_out = fileOpen(fname1, "w");
sprintf(fname1, "%s%s_TRANSCHROMOSOMAL", mappingOutputPath, mappingOutput);
i = 0;
for(j = i+1; j < _msf_maxFile; j++)
{
if(i != j)
{
for(k = 0; k < _msf_fileCount[i]; k++)
{
for(l = 0; l < _msf_fileCount[j]; l++)
{
outputTransChromosomal(_msf_fileName[i][k][0], _msf_fileName[j][l][1], fp_out);
}// for l
}// for k
}// if
}// for j
}
for(i = 0; i < _msf_maxFile; i++)
{
for(j = 0; j < _msf_fileCount[i]; j++)
{
unlink(_msf_fileName[i][j][0]);
unlink(_msf_fileName[i][j][1]);
}
}
if(flag)
fclose(fp_out);
*/
}
void initFASTCG(Read *seqList, int seqListSize, char *genFileName, int AccReads, int first_try) {
// Optional Field Memory Allocation: _msf_optionalFields
int i, j;
if (_msf_samplingLocsEnds == NULL) { // DHL FIRE OK
_msf_samplingLocsEnds = getMem(1, "_msf_samplingLocsEnds @initFASTCG()");
_msf_seqList = seqList;
_msf_seqListSize = seqListSize;
_msf_totalSeqListSize = AccReads;
preProcessReads();
_msf_oeaMapping = getMem(_msf_seqListSize * sizeof(int), "_msf_oeaMapping @initFASTCG()");
for (i = 0; i < _msf_seqListSize; i++)
_msf_oeaMapping[i] = 0;
_msf_discordantMapping = getMem(_msf_seqListSize * sizeof(int), "_msf_discordantMapping @initFASTCG()");
for (i = 0; i < _msf_seqListSize; i++)
_msf_discordantMapping[i] = 0;
}
// Reference Genome Name Update
if (_msf_refGenName == NULL) { // DHL FIRE OK
_msf_refGenName = getMem(4*SEQ_LENGTH, "_msf_refGenName @initFASTCG()");
}
if (_msf_verifiedLocs != NULL) {
for (i=0;i<number_of_threads;i++)
if (_msf_verifiedLocs[i] != NULL)
freeMem(_msf_verifiedLocs[i], sizeof(int) * (_msf_refGenLength+1), "_msf_verifiedLocs[i] @initFASTCG()");
freeMem(_msf_verifiedLocs, sizeof(int *) * number_of_threads, "_msf_verifiedLocs @initFASTCG()");
}
_msf_refGen = getRefGenome(); // DHL VERIFY
_msf_refGenLength = strlen(_msf_refGen);
_msf_refGenOffset = getRefGenomeOffset();
snprintf(_msf_refGenName, 4*SEQ_LENGTH,"%s%c", getRefGenomeName(), '\0');
_msf_refGenName[strlen(getRefGenomeName())] = '\0';
_msf_verifiedLocs = (int **) getMem(sizeof(int *) * number_of_threads, "_msf_verifiedLocs @initFASTCG()");
for (i=0;i<number_of_threads;i++)
_msf_verifiedLocs[i] = (int *) getMem(sizeof(int) * (_msf_refGenLength+1), "_msf_verifiedLocs[i] @initFASTCG()");
for (i=0;i<number_of_threads;i++){
for (j=0; j<=_msf_refGenLength; j++) {
_msf_verifiedLocs[i][j] = _msf_seqListSize*10+1;
}
}
if (pairedEndMode && _msf_seqHits == NULL) {
_msf_mappingInfo = getMem(seqListSize * sizeof (MappingInfo), "_msf_mappingInfo @initFASTCG()");
for (i=0; i<seqListSize; i++) {
_msf_mappingInfo[i].next = NULL;
_msf_mappingInfo[i].size = 0;
}
_msf_seqHits = getMem((_msf_seqListSize) * sizeof(int), "_msf_seqHits @initFASTCG()");
for (i=0; i<_msf_seqListSize; i++) {
_msf_seqHits[i] = 0;
}
_msf_readHasConcordantMapping = getMem(_msf_seqListSize / 2 * sizeof(char), "_msf_readHasConcordantMapping @initFASTCG()");
for(i = 0; i < _msf_seqListSize/2; i++) {
_msf_readHasConcordantMapping[i] = 0;
}
if (first_try)
initLoadingRefGenome(genFileName);
}
if (_msf_refGenOffset == 0) { // DHL FIRE
_msf_refGenBeg = 1;
}
else {
_msf_refGenBeg = CONTIG_OVERLAP - SEQ_LENGTH + 2;
}
_msf_refGenEnd = _msf_refGenLength - SEQ_LENGTH + 1;
}
void mapPairedEndSeqCG(Read *seqList, unsigned int seqListSize, unsigned int AccReads) {
_msf_totalSeqListSize = AccReads;
_msf_seqListSize = seqListSize;
_msf_seqList = seqList;
int i = 0;
int j = 0;
int k = 0;
int m = 0;
int tid = 0;
int key_hash[TEST_KEY_NUM];
unsigned int *locs = NULL;
key_struct* key_input = getMem(TEST_KEY_NUM*sizeof(key_struct), "key_input @mapPairedEndSeqCG()");
// First sequence in Forward
for(i = 0; i < TEST_KEY_NUM; i++) {
key_input[i].key_entry = NULL;
key_input[i].key_locs = NULL;
key_input[i].key_number = 0;
key_input[i].key_entry_size = 0;
key_hash[i] = 0;
}
for(i = 0; i < _msf_seqListSize; i++) {
k = _msf_sort_seqList[i].readNumber;
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].seq + m*10 + 5); // forward 5-10-10-10
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL)
key_input[m].key_entry_size = locs[0];
else
key_input[m].key_entry_size = -1;
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapPairEndSeqCG_forward(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
0,
0, tid);
}
}
}
// First sequence in Forward
for(i = 0; i < TEST_KEY_NUM; i++) {
key_input[i].key_entry = NULL;
key_input[i].key_locs = NULL;
key_input[i].key_number = 0;
key_input[i].key_entry_size = 0;
key_hash[i] = 0;
}
for(i = 0; i < _msf_seqListSize; i++) {
//printf("%d\n", i);
k = _msf_sort_seqList[i].readNumber;
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10 + 5); // forward 5-10-10-10
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL)
key_input[m].key_entry_size = locs[0];
else
key_input[m].key_entry_size = -1;
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapPairEndSeqCG_forward(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
1,
0, tid);
}
}
}
// First read in Reverse
for(i = 0; i < TEST_KEY_NUM; i++) {
key_input[i].key_entry = NULL;
key_input[i].key_locs = NULL;
key_input[i].key_number = 0;
key_input[i].key_entry_size = 0;
key_hash[i] = 0;
}
for(i = 0; i < _msf_seqListSize; i++) {
k = _msf_sort_seqList[i].readNumber;
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10); // reverse 10-10-10-5
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL)
key_input[m].key_entry_size = locs[0];
else
key_input[m].key_entry_size = -1;
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapPairEndSeqCG_reverse(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
1,
0, tid);
}
}
}
for(i = 0; i < TEST_KEY_NUM; i++) {
key_input[i].key_entry = NULL;
key_input[i].key_locs = NULL;
key_input[i].key_number = 0;
key_input[i].key_entry_size = 0;
key_hash[i] = 0;
}
for(i = 0; i < _msf_seqListSize; i++) {
k = _msf_sort_seqList[i].readNumber;
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].seq + m*10); // reverse 10-10-10-5
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL)
key_input[m].key_entry_size = locs[0];
else
key_input[m].key_entry_size = -1;
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapPairEndSeqCG_reverse(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
0,
0, tid);
}
}
}
freeMem(key_input, TEST_KEY_NUM * sizeof(key_struct), "key_input @mapPairedEndSeqCG");
char fname1[FILE_NAME_LENGTH];
char fname2[FILE_NAME_LENGTH];
MappingLocations *cur;
int tmpOut;
int lmax = 0, rmax = 0;
sprintf(fname1, "%s__%s__%s__%d__1.tmp", mappingOutputPath, _msf_refGenName,
mappingOutput, _msf_openFiles);
sprintf(fname2, "%s__%s__%s__%d__2.tmp", mappingOutputPath, _msf_refGenName,
mappingOutput, _msf_openFiles);
FILE* out;
FILE* out1 = fileOpen(fname1, "w");
FILE* out2 = fileOpen(fname2, "w");
_msf_openFiles++;
for (i = 0; i < _msf_seqListSize; i++) {
if (i % 2 == 0) {
out = out1;
if (lmax < _msf_mappingInfo[i].size) {
lmax = _msf_mappingInfo[i].size;
}
} else {
out = out2;
if (rmax < _msf_mappingInfo[i].size) {
rmax = _msf_mappingInfo[i].size;
}
}
tmpOut = fwrite(&(_msf_mappingInfo[i].size), sizeof(int), 1, out);
if (_msf_mappingInfo[i].size > 0) {
cur = _msf_mappingInfo[i].next;
for (j = 0; j < _msf_mappingInfo[i].size; j++) {
if (j > 0 && j % MAP_CHUNKS == 0) {
cur = cur->next;
}
if(debugMode && (cur->cigarSize[j % MAP_CHUNKS] > SEQ_LENGTH || cur->mdSize[j % MAP_CHUNKS] > SEQ_LENGTH))
{
printf("ERROR in %d read size exceeds cigar=%d md =%d cigar=%s md =%s\n", i, cur->cigarSize[j % MAP_CHUNKS], cur->mdSize[j % MAP_CHUNKS], cur->cigar[j % MAP_CHUNKS], cur->md[j % MAP_CHUNKS]);
}
tmpOut = fwrite(&(cur->loc[j % MAP_CHUNKS]), sizeof(int), 1,
out);
tmpOut = fwrite(&(cur->err[j % MAP_CHUNKS]), sizeof(int), 1,
out);
tmpOut = fwrite(&(cur->cigarSize[j % MAP_CHUNKS]), sizeof(int),
1, out);
tmpOut = fwrite((cur->cigar[j % MAP_CHUNKS]), sizeof(char),
(cur->cigarSize[j % MAP_CHUNKS]), out);
tmpOut = fwrite(&(cur->mdSize[j % MAP_CHUNKS]), sizeof(int), 1,
out);
tmpOut = fwrite((cur->md[j % MAP_CHUNKS]), sizeof(char),
(cur->mdSize[j % MAP_CHUNKS]), out);
}
//TODO: if freeAllMapping exist the next line should be comment
//_msf_mappingInfo[i].size = 0;
}
}
freeAllMapping();
_msf_maxLSize += lmax;
_msf_maxRSize += rmax;
tmpOut++;
fclose(out1);
fclose(out2);
}
int searchKeyCG(int target_coor, unsigned int* entry_coor, int entry_size, int range) {
if (entry_size <= 0)
return -1;
int lower_bound = 1;
int upper_bound = entry_size;
int mid = lower_bound + entry_size / 2;
while (lower_bound < upper_bound) {
if (entry_coor[mid] == target_coor)
return entry_coor[mid];
else if (entry_coor[mid] < target_coor)
lower_bound = mid + 1;
else
upper_bound = mid - 1;
mid = lower_bound + (upper_bound - lower_bound) / 2;
if (entry_coor[upper_bound] == target_coor) {
return entry_coor[upper_bound];
}
if (entry_coor[lower_bound] == target_coor) {
return entry_coor[lower_bound];
}
if (entry_coor[mid] == target_coor) {
return entry_coor[mid];
}
}
if (entry_coor[mid] <= (target_coor + range) && entry_coor[mid] >= target_coor) {
return entry_coor[mid];
}
else if (entry_coor[mid+1] <= (target_coor + range) && entry_coor[mid+1] >= target_coor) {
return entry_coor[mid+1];
}
else
return -1;
}
int verifySingleEndCG1(int refIndex, char* seq1, int * tmp_offset, int variable, int length) {
int i = 0;
int err = 0;
int errCnt = 0;
char* ref = _msf_refGen + refIndex + variable;
char* seq = seq1;
for (i = 0; i < KEY_LENGTH; i++) {
err = *ref != *seq;
errCnt += err;
seq++;
ref++;
}
if (DEBUG == 1) {
fprintf(stdout, "##### Hamming Distance Verification: Segment 1 LOC:%d #####\n", refIndex);
for (i = 0; i < KEY_LENGTH; i++)
fprintf(stdout, "(%c:%c)", *(ref-10+i), *(seq-10+i));
fprintf(stdout, " Error :%d", errCnt);
fprintf(stdout, " OFFSET:%d\n", tmp_offset[1]);
}
return errCnt;
}
int verifySingleEndCG(int refIndex, int readIndex, char * seq1, int * offset, int variable, int length) {
int i = 0;
int j = 0;
int err = 0;
int errCnt = 0;
int errMin = 10;
char* ref;
char* seq;
for (i = 0; i < variable; i++) {
errCnt = 0;
ref = _msf_refGen + refIndex + i;
seq = seq1 + readIndex;
if (DEBUG == 1) {
fprintf(stdout, "## Hamming: General LOC: %s SEQ: %s ##### ", ref, seq);
}
for (j = 0; j < length; j++) {
err = *ref != *seq;
errCnt += err;
seq++;
ref++;
}
if (errCnt < errMin) {
errMin = errCnt;
offset[0] = i;
}
if (DEBUG == 1) {
for (j = 0; j < length; j++)
fprintf(stdout, "(%c:%c)", *(_msf_refGen + refIndex + i + j), *(seq1 + readIndex + j));
fprintf(stdout, " ERROR : %d", errCnt);
fprintf(stdout, " OFFSET: %d\n", i);
}
}
if (DEBUG == 1) {
fprintf(stdout, " ErrorMin: %d\n", errMin);
}
return errMin;
}
void mapAllSingleEndSeqCG() {
int i = 0;
int j = 0;
int k = 0;
int m = 0;
int key_hash[TEST_KEY_NUM];
unsigned int *locs = NULL;
key_struct key_input[TEST_KEY_NUM];
int tid = 0;
omp_set_num_threads(number_of_threads);
#pragma omp parallel
{
// Forward
#pragma omp for private(k,m,j,key_hash,key_input,locs,tid)
for(i = 0; i < _msf_seqListSize; i++) {
k = _msf_sort_seqList[i].readNumber;
tid = omp_get_thread_num();
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].seq + m*10 + 5); // forward 5-10-10-10
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL) {
key_input[m].key_entry_size = locs[0];
}
else {
key_input[m].key_entry_size = -1;
}
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapSingleEndSeqCG_forward(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
0,
0, tid);
}
}
}
#pragma omp for private(k,m,j,key_hash,key_input,locs,tid)
for(i = 0; i < _msf_seqListSize; i++) {
k = _msf_sort_seqList[i].readNumber;
tid = omp_get_thread_num();
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10 + 5); // forward 5-10-10-10
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL)
key_input[m].key_entry_size = locs[0];
else
key_input[m].key_entry_size = -1;
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapSingleEndSeqCG_forward(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
1,
0, tid);
}
}
}
#pragma omp for private(k,m,j,key_hash,key_input,locs,tid)
for(i = 0; i < _msf_seqListSize; i++) {
k = _msf_sort_seqList[i].readNumber;
tid = omp_get_thread_num();
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10); // forward 5-10-10-10
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL)
key_input[m].key_entry_size = locs[0];
else
key_input[m].key_entry_size = -1;
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapSingleEndSeqCG_reverse(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
1,
0, tid);
}
}
}
#pragma omp for private(k,m,j,key_hash,key_input,locs,tid)
for(i = 0; i < _msf_seqListSize; i++) {
k = _msf_sort_seqList[i].readNumber;
tid = omp_get_thread_num();
for (m = 0; m < TEST_KEY_NUM; m++) {
key_hash[m] = hashVal(_msf_seqList[k].seq + m*10); // forward 5-10-10-10
locs = getCandidates(key_hash[m]);
key_input[m].key_number = m;
key_input[m].key_entry = locs;
key_input[m].key_locs = locs + 1;
if (locs != NULL)
key_input[m].key_entry_size = locs[0];
else
key_input[m].key_entry_size = -1;
}
for (j = 0; j < TEST_KEY_NUM - 1; j++) {
if (key_input[j].key_entry_size > 0) {
mapSingleEndSeqCG_reverse(
key_input[j].key_locs, // l1
key_input[j].key_entry_size,// s1
k, // readNumber
key_input[j].key_number, // readSegment
j, // index
key_input, // key_input
0,
0, tid);
}
}
}
}
return ;
}
void mapPairEndSeqCG_reverse(unsigned int *l1, int s1, int readNumber, int readSegment, int index,
key_struct* key_input, int direction, int first_mate, int thread_id) {
char matrix[200];
char editString[200];
char cigar[MAX_CIGAR_SIZE];
int r = readNumber;
int d = (direction==1?-1:1);
int readId = 2*readNumber + direction;
char *_tmpSeq;
char rqual[SEQ_LENGTH+1];
rqual[SEQ_LENGTH]='\0';
if (direction) {
reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH);
_tmpSeq = _msf_seqList[readNumber].rseq;
}
else {
_tmpSeq = _msf_seqList[readNumber].seq;
}
int i = 0;
int j = 0;
int genLoc = 0;
int *locs = (int *) l1;
for (j = 0; j < s1; j++) {
genLoc = locs[j];
int af_pass[4];
int af_offset[4];
int err = -1;
if (key_input[index].key_number == 0) {
if (genLoc - 1 < _msf_refGenBeg
|| genLoc - 1 + 35 + 8 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc] == readId ) {
continue;
}
}
else if (key_input[index].key_number == 1) {
if (genLoc - 1 - 10 - 7 < _msf_refGenBeg
|| genLoc - 1 + 25 - 1 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc - 10 - 6] == readId ) {
continue;
}
}
err = verifySingleEndSeqCG_backward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index);
if (err <= errThreshold && err >= 0) {
generateAlignmentMatrxCG_backward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix);
generateSNPSAM(matrix, strlen(matrix), editString);
sprintf(cigar, "5M%dS10M%dN10M%dN10M", -af_offset[2], af_offset[1], af_offset[0]);
}
else {
err = -1;
}
//##### mrfast code #####
if(err != -1 && !bestMode) {
int offset_range = 3;
for(i = -offset_range ; i <= offset_range ; i++) {
if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) {
_msf_verifiedLocs[thread_id][genLoc + i] = readId;
}
}
/* calkan counter */
mappingCnt++;
MappingLocations *parent = NULL;
MappingLocations *child = _msf_mappingInfo[r].next;
for (i = 0; i < (_msf_mappingInfo[r].size / MAP_CHUNKS); i++) {
parent = child;
child = child->next;
}
if (child == NULL) {
MappingLocations *tmp = getMem(sizeof(MappingLocations), "_msf_mappingInfo.next or tmp @mapPairEndSeqCG_reverse()");
tmp->next = NULL;
tmp->loc[0] = (genLoc+_msf_refGenOffset) * d;
tmp->err[0] = err;
tmp->cigarSize[0] = strlen(cigar);
sprintf(tmp->cigar[0], "%s", cigar);
tmp->mdSize[0] = strlen(editString);
sprintf(tmp->md[0], "%s", editString);
if (parent == NULL)
_msf_mappingInfo[r].next = tmp;
else
parent->next = tmp;
} else {
if (strlen(cigar) > SEQ_LENGTH
|| strlen(editString) > SEQ_LENGTH) {
printf(
"ERROR in %d read size(After mapping) exceeds cigar=%d md =%d cigar=%s md =%s\n",
r, (int) strlen(cigar), (int) strlen(editString),
cigar, editString);
}
child->loc[_msf_mappingInfo[r].size % MAP_CHUNKS] = (genLoc+_msf_refGenOffset) * d;
child->err[_msf_mappingInfo[r].size % MAP_CHUNKS] = err;
child->cigarSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(cigar);
sprintf(child->cigar[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", cigar);
child->mdSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(editString);
sprintf(child->md[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", editString);
}
_msf_mappingInfo[r].size++;
}
}
}
void freeAllMapping()
{
int i = 0;
int j = 0;
MappingLocations *prev;
MappingLocations *cur;
for(i = 0; i < _msf_seqListSize; i++) {
if (_msf_mappingInfo[i].size > 0) {
cur = _msf_mappingInfo[i].next;
for(j = 0; j < _msf_mappingInfo[i].size; j++) {
if(j>0 && j % MAP_CHUNKS == 0) {
prev = cur;
cur = cur->next;
freeMem(prev, sizeof(MappingLocations), "prev @freeAllMapping()");
}
}
if(cur != NULL)
freeMem(cur, sizeof(MappingLocations), "cur @freeAllMapping()");
}
_msf_mappingInfo[i].next = NULL;
_msf_mappingInfo[i].size = 0;
}
}
void mapPairEndSeqCG_forward(unsigned int *l1, int s1, int readNumber, int readSegment, int index,
key_struct* key_input, int direction, int first_mate, int thread_id) {
char matrix[MAX_CIGAR_SIZE];
char editString[MAX_CIGAR_SIZE];
char cigar[MAX_CIGAR_SIZE];
int r = readNumber;
int d = (direction==1?-1:1);
int readId = 2*readNumber+direction;
char *_tmpSeq;
char rqual[SEQ_LENGTH+1];
int i = 0;
rqual[SEQ_LENGTH]='\0';
if (direction) {
reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH);
_tmpSeq = _msf_seqList[readNumber].rseq;
}
else {
_tmpSeq = _msf_seqList[readNumber].seq;
}
int j = 0;
int genLoc = 0;
int *locs = (int *) l1;
for (j = 0; j < s1; j++) {
genLoc = locs[j];
int af_pass[4];
int af_offset[4];
int err = -1;
if (key_input[index].key_number == 0) {
if (genLoc - 1 - 5 + 1 < _msf_refGenBeg
|| genLoc - 1 + 30 + 9 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc - 5] == readId ) {
continue;
}
}
else if (key_input[index].key_number == 1) {
if (genLoc - 1 - 15 - 1 < _msf_refGenBeg
|| genLoc - 1 + 20 + 7 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc - 15] == readId ) {
continue;
}
}
err = verifySingleEndSeqCG_forward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index);
if (err <= errThreshold && err >= 0) {
generateAlignmentMatrxCG_forward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix);
generateSNPSAM(matrix, strlen(matrix), editString);
sprintf(cigar, "5M%dS10M%dN10M%dN10M", -af_offset[0], af_offset[1], af_offset[2]);
}
else {
err = -1;
}
//##### mrfast code #####
if(err != -1 && !bestMode) {
int offset_range = 3;
for(i = -offset_range ; i <= offset_range ; i++) {
if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) {
_msf_verifiedLocs[thread_id][genLoc + i] = readId;
}
}
/* calkan counter */
mappingCnt++;
MappingLocations *parent = NULL;
MappingLocations *child = _msf_mappingInfo[r].next;
for (i = 0; i < (_msf_mappingInfo[r].size / MAP_CHUNKS); i++) {
parent = child;
child = child->next;
}
if (child == NULL) {
MappingLocations *tmp = getMem(sizeof(MappingLocations), "MappingLocations @mapPairEndSeqCG_forward()");
tmp->next = NULL;
tmp->loc[0] = (genLoc+_msf_refGenOffset) * d;
tmp->err[0] = err;
tmp->cigarSize[0] = strlen(cigar);
sprintf(tmp->cigar[0], "%s", cigar);
tmp->mdSize[0] = strlen(editString);
sprintf(tmp->md[0], "%s", editString);
if (parent == NULL)
_msf_mappingInfo[r].next = tmp;
else
parent->next = tmp;
} else {
if (strlen(cigar) > SEQ_LENGTH || strlen(editString) > SEQ_LENGTH) {
printf(
"ERROR in %d read size(After mapping) exceeds cigar=%d md =%d cigar=%s md =%s\n",
r, (int) strlen(cigar), (int) strlen(editString),
cigar, editString);
}
child->loc[_msf_mappingInfo[r].size % MAP_CHUNKS] = (genLoc + _msf_refGenOffset) * d;
child->err[_msf_mappingInfo[r].size % MAP_CHUNKS] = err;
child->cigarSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(cigar);
sprintf(child->cigar[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", cigar);
child->mdSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(editString);
sprintf(child->md[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", editString);
}
_msf_mappingInfo[r].size++;
}
}
}
void generateAlignmentMatrxCG_backward(int genLoc, int * af_offset, char * seq, int * af_pass, int error, char * matrix) {
char * ref;
int ix = 0;
ref = _msf_refGen + genLoc;
for(ix = 0; ix < 10; ix++) {
if(ref[ix] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix];
}
for(ix = 10; ix < 20; ix++) {
if(ref[ix+af_offset[0]] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix+af_offset[0]];
}
for(ix = 20; ix < 30; ix++) {
if(ref[ix+af_offset[0]+af_offset[1]] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix+af_offset[0]+af_offset[1]];
}
for(ix = 30; ix < 35; ix++) {
if(ref[ix+af_offset[0]+af_offset[1]+af_offset[2]] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix+af_offset[0]+af_offset[1]+af_offset[2]];
}
matrix[ix] = '\0';
}
void generateAlignmentMatrxCG_forward(int genLoc, int * af_offset, char * seq, int * af_pass, int error, char * matrix) {
char * ref;
int ix = 0;
ref = _msf_refGen + genLoc;
for(ix = 0; ix < 5; ix++) {
if(ref[ix] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix];
}
for(ix = 5; ix < 15; ix++) {
if(ref[ix+af_offset[0]] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix+af_offset[0]];
}
for(ix = 15; ix < 25; ix++) {
if(ref[ix+af_offset[0]+af_offset[1]] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix+af_offset[0]+af_offset[1]];
}
for(ix = 25; ix < 35; ix++) {
if(ref[ix+af_offset[0]+af_offset[1]+af_offset[2]] == seq[ix])
matrix[ix] = 'M';
else
matrix[ix] = ref[ix+af_offset[0]+af_offset[1]+af_offset[2]];
}
matrix[ix] = '\0';
}
int verifySingleEndSeqCG_forward(int * locs, int * af_offset, char * seq1, int * af_pass, key_struct* key_input, int index) {
int err = -1;
int test = 0;
int target = 0;
int offset[1] = {0};
int x = 0;
int minErr = 0;
int minErrLoc = 0;
int tmp_offset[3] = {0, 0, 0};
int genLoc = *locs;
if (key_input[index].key_number == 0) {
target = genLoc + KEY_LENGTH;
test = searchKeyCG(target, key_input[1].key_entry, key_input[1].key_entry_size, EXPECTED_GAP);
if (test != -1) {
af_offset[1] = test - target;
err = verifySingleEndCG(genLoc + KEY_LENGTH*2 - 1 + af_offset[1] + INITIAL_GAP23, 25, seq1, offset, INDEL_GAP, 10);
if (err > errThreshold) {
return -1;
}
af_pass[0] = 1;
af_pass[1] = 1;
af_pass[2] = 0;
af_offset[2] = INITIAL_GAP23 + offset[0];
err += verifySingleEndCG(genLoc - KEY_LENGTH0 - 1 - INITIAL_GAP01, 0, seq1, offset, INDEL_GAP, 5);
af_offset[0] = -1 - offset[0];
genLoc = genLoc - KEY_LENGTH0 - 1 - af_offset[0];
}
else {
target = genLoc + KEY_LENGTH*2 + INITIAL_GAP23;
test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP*2);
if (test != -1) {
af_offset[2] = test - target + INITIAL_GAP23;
if (af_offset[2] == 5) {
err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12, 15, seq1, offset, INDEL_GAP - 2, 10);
af_offset[2] = 5 + offset[0];
af_offset[1] = 5 - (5 + offset[0]);
}
else if (af_offset[2] == 6) {
err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12, 15, seq1, offset, INDEL_GAP - 1, 10);
af_offset[2] = 5 + offset[0];
af_offset[1] = 6 - (5 + offset[0]);
}
else if (af_offset[2] == 7) {
err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12, 15, seq1, offset, INDEL_GAP, 10);
af_offset[2] = 5 + offset[0];
af_offset[1] = 7 - (5 + offset[0]);
}
else if (af_offset[2] == 8) {
err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12 + 1, 15, seq1, offset, INDEL_GAP - 1, 10);
af_offset[2] = 6 + offset[0];
af_offset[1] = 8 - (6 + offset[0]);
}
else if (af_offset[2] == 9) {
err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12 + 2, 15, seq1, offset, INDEL_GAP - 2, 10);
af_offset[2] = 7 + offset[0];
af_offset[1] = 9 - (7 + offset[0]);
}
if (err > errThreshold) {
return -1;
}
af_pass[0] = 1;
af_pass[1] = 0;
af_pass[2] = 1;
err += verifySingleEndCG(genLoc - KEY_LENGTH0 - 1 - INITIAL_GAP01, 0, seq1, offset, INDEL_GAP, 5);
af_offset[0] = INITIAL_GAP01 - offset[0];
genLoc = genLoc - KEY_LENGTH0 - 1 - af_offset[0];
}
}
}
else if (key_input[index].key_number == 1) {
target = genLoc + KEY_LENGTH + INITIAL_GAP23;
test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP);
if (test != -1) {
af_pass[0] = 0;
af_pass[1] = 1;
af_pass[2] = 1;
af_offset[2] = test - target + INITIAL_GAP23;
for (x = 0; x < INDEL_GAP; x++) {
minErr = verifySingleEndCG1(genLoc - 13, seq1 + 5, offset, x, 10); // gap12 0 ~ 2 --> 2
tmp_offset[1] = 2 - x;
if (minErr <= errThreshold) {
minErr += verifySingleEndCG(genLoc - 15 - tmp_offset[1], 0, seq1, offset, INDEL_GAP, 5);
tmp_offset[0] = -1 - offset[0];
}
if (err > minErr || err < 0) {
err = minErr;
minErrLoc = genLoc - (KEY_LENGTH0 + KEY_LENGTH)- 1 - tmp_offset[0] - tmp_offset[1];
af_offset[0] = tmp_offset[0];
af_offset[1] = tmp_offset[1];
}
}
genLoc = minErrLoc;
}
}
*locs = genLoc;
return err;
}
int verifySingleEndSeqCG_backward(int * locs, int * af_offset, char * seq1, int * af_pass, key_struct* key_input, int index) {
int err = -1;
int test = 0;
int target = 0;
int x = 0;
int minErr = 0;
int tmp_offset[3] = {0, 0, 0};
int offset[1] = {0};
int genLoc = *locs;
if (key_input[index].key_number == 0) {
target = genLoc + KEY_LENGTH + INITIAL_GAP23;
test = searchKeyCG(target, key_input[1].key_entry, key_input[1].key_entry_size, EXPECTED_GAP);
if (test != -1) {
af_pass[0] = 1;
af_pass[1] = 1;
af_pass[2] = 0;
af_offset[0] = test - target + INITIAL_GAP23;
for (x = 0; x <INDEL_GAP; x++) {
minErr = verifySingleEndCG1(genLoc + 20 + af_offset[0] - 1, seq1 + 20, offset, x, 10);
tmp_offset[1] = x;
if (minErr <= errThreshold) {
minErr += verifySingleEndCG(genLoc + 30 + af_offset[0] + tmp_offset[1] - 4, 30, seq1, offset, INDEL_GAP, 5);
tmp_offset[2] = offset[0] - 3;
}
if (err > minErr || err < 0) {
err = minErr;
af_offset[1] = tmp_offset[1];
af_offset[2] = tmp_offset[2];
}
}
}
else {
target = genLoc + KEY_LENGTH*2 + INITIAL_GAP12 + INITIAL_GAP23;
test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP*2);
if (test != -1) {
af_offset[0] = test - target + INITIAL_GAP23;
if (af_offset[0] == 5) {
err = verifySingleEndCG(genLoc + KEY_LENGTH + 5 - 1, 10, seq1, offset, INDEL_GAP - 2, 10);
af_offset[0] = 5 + offset[0];
af_offset[1] = 5 - (5 + offset[0]);
}
else if (af_offset[0] == 6) {
err = verifySingleEndCG(genLoc + KEY_LENGTH + 5 - 1, 10, seq1, offset, INDEL_GAP - 1, 10);
af_offset[0] = 5 + offset[0];
af_offset[1] = 6 - (5 + offset[0]);
}
else if (af_offset[0] == 7) {
err = verifySingleEndCG(genLoc + KEY_LENGTH + 5 - 1, 10, seq1, offset, INDEL_GAP, 10);
af_offset[0] = 5 + offset[0];
af_offset[1] = 7 - (5 + offset[0]);
}
else if (af_offset[0] == 8) {
err = verifySingleEndCG(genLoc + KEY_LENGTH + 6 - 1, 10, seq1, offset, INDEL_GAP - 1, 10);
af_offset[0] = 6 + offset[0];
af_offset[1] = 8 - (6 + offset[0]);
}
else if (af_offset[0] == 9) {
err = verifySingleEndCG(genLoc + KEY_LENGTH + 7 - 1, 10, seq1, offset, INDEL_GAP - 2, 10);
af_offset[0] = 7 + offset[0];
af_offset[1] = 9 - (7 + offset[0]);
}
if (err > errThreshold) {
return -1;
}
af_pass[0] = 1;
af_pass[1] = 0;
af_pass[2] = 1;
err += verifySingleEndCG(genLoc+30+af_offset[0]+af_offset[1] - 4, 30, seq1, offset, INDEL_GAP, 5);
af_offset[2] = offset[0] - 3;
}
}
genLoc = genLoc - 1 ;
}
else if (key_input[index].key_number == 1) {
target = genLoc + KEY_LENGTH;
test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP);
if (test != -1) {
err = verifySingleEndCG(genLoc - 10 - 1 - 7, 0, seq1, offset, INDEL_GAP, 10);
if (err > errThreshold) {
return -1;
}
af_pass[0] = 0;
af_pass[1] = 1;
af_pass[2] = 1;
af_offset[1] = test - target;
af_offset[0] = 7 - offset[0];
err += verifySingleEndCG(genLoc + 20 + af_offset[1] - 4, 30, seq1, offset, INDEL_GAP, 5);
af_offset[2] = offset[0] - 3;
genLoc = genLoc - KEY_LENGTH - af_offset[0] - 1;
}
}
*locs = genLoc;
return err;
}
// sirfast: mapSingleEndSeqCG_forward
// first_mate 0 or 1, 0 the read is the first part and 1 is the second part.
void mapSingleEndSeqCG_forward(unsigned int *l1, int s1, int readNumber, int readSegment, int index,
key_struct* key_input, int direction, int first_mate, int thread_id) {
char matrix[MAX_CIGAR_SIZE];
char editString[MAX_CIGAR_SIZE];
char cigar[MAX_CIGAR_SIZE];
int readId = 2*readNumber+direction;
char *_tmpSeq;
char *_tmpQual;
char rqual[SEQ_LENGTH+1];
SAM _msf_output;
OPT_FIELDS _msf_optionalFields[2];
rqual[SEQ_LENGTH]='\0';
int i = 0;
if (direction) {
reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH);
_tmpQual = rqual;
_tmpSeq = _msf_seqList[readNumber].rseq;
}
else {
_tmpQual = _msf_seqList[readNumber].qual;
_tmpSeq = _msf_seqList[readNumber].seq;
}
int j = 0;
int genLoc = 0;
int *locs = (int *) l1;
for (j = 0; j < s1; j++) {
genLoc = locs[j];
int af_pass[4];
int af_offset[4];
int err = -1;
if (key_input[index].key_number == 0) {
if (genLoc - 1 - 5 + 1 < _msf_refGenBeg
|| genLoc - 1 + 30 + 9 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc - 5] == readId ) {
continue;
}
}
else if (key_input[index].key_number == 1) {
if (genLoc - 1 - 15 - 1 < _msf_refGenBeg
|| genLoc - 1 + 20 + 7 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc - 15] == readId ) {
continue;
}
}
err = verifySingleEndSeqCG_forward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index);
if (err <= errThreshold && err >= 0) {
generateAlignmentMatrxCG_forward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix);
generateSNPSAM(matrix, strlen(matrix), editString);
sprintf(cigar, "5M%dS10M%dN10M%dN10M", -af_offset[0], af_offset[1], af_offset[2]);
}
else {
err = -1;
}
if(err != -1 && !bestMode) {
mappingCnt++;
int offset_range = 3;
for(i = -offset_range ; i <= offset_range ; i++) {
if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) {
_msf_verifiedLocs[thread_id][genLoc + i] = readId;
}
}
_msf_seqList[readNumber].hits[0]++;
_msf_output.QNAME = _msf_seqList[readNumber].name;
_msf_output.FLAG = 16 * direction;
_msf_output.RNAME = _msf_refGenName;
_msf_output.POS = genLoc + _msf_refGenOffset;
_msf_output.MAPQ = 255;
_msf_output.CIGAR = cigar;
_msf_output.MRNAME = "*";
_msf_output.MPOS = 0;
_msf_output.ISIZE = 0;
_msf_output.SEQ = _tmpSeq;
_msf_output.QUAL = _tmpQual;
_msf_output.optSize = 2;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = err;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = editString;
output(_msf_output, thread_id);
if (_msf_seqList[readNumber].hits[0] == 1) {
mappedSeqCnt++;
}
if ( maxHits == 0 ) {
_msf_seqList[readNumber].hits[0] = 2;
}
if ( maxHits!=0 && _msf_seqList[readNumber].hits[0] == maxHits) {
completedSeqCnt++;
break;
}
}
}
}
// sirfast: mapSingleEndSeqCG_reverse
// first_mate 0 or 1, 0 the read is the first part and 1 is the second part.
void mapSingleEndSeqCG_reverse(unsigned int *l1, int s1, int readNumber, int readSegment, int index,
key_struct* key_input, int direction, int first_mate, int thread_id) {
char matrix[200];
char editString[200];
char cigar[MAX_CIGAR_SIZE];
int readId = 2 * readNumber + direction;
char *_tmpSeq, *_tmpQual;
char rqual[SEQ_LENGTH+1];
SAM _msf_output;
OPT_FIELDS _msf_optionalFields[2];
rqual[SEQ_LENGTH]='\0';
int i = 0;
if (direction) {
reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH);
_tmpQual = rqual;
_tmpSeq = _msf_seqList[readNumber].rseq;
}
else {
_tmpQual = _msf_seqList[readNumber].qual;
_tmpSeq = _msf_seqList[readNumber].seq;
}
int j = 0;
int genLoc = 0;
int *locs = (int *) l1;
for (j = 0; j < s1; j++) {
genLoc = locs[j];
int af_pass[4];
int af_offset[4];
int err = -1;
if (key_input[index].key_number == 0) {
if (genLoc - 1 < _msf_refGenBeg
|| genLoc - 1 + 35 + 8 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc] == readId ) {
continue;
}
}
else if (key_input[index].key_number == 1) {
if (genLoc - 1 - 10 - 7 < _msf_refGenBeg
|| genLoc - 1 + 25 - 1 > _msf_refGenEnd
|| _msf_verifiedLocs[thread_id][genLoc - 10 - 6] == readId ) {
continue;
}
}
err = verifySingleEndSeqCG_backward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index);
if (err <= errThreshold && err >= 0) {
generateAlignmentMatrxCG_backward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix);
generateSNPSAM(matrix, strlen(matrix), editString);
sprintf(cigar, "10M%dN10M%dN10M%dS5M", af_offset[0], af_offset[1], -af_offset[2]);
}
else {
err = -1;
}
if(err != -1 && !bestMode) {
mappingCnt++;
int offset_range = 3;
for(i = -offset_range ; i <= offset_range ; i++) {
if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) {
_msf_verifiedLocs[thread_id][genLoc + i] = readId;
}
}
_msf_seqList[readNumber].hits[0]++;
_msf_output.QNAME = _msf_seqList[readNumber].name;
_msf_output.FLAG = 16 * direction;
_msf_output.RNAME = _msf_refGenName;
_msf_output.POS = genLoc + _msf_refGenOffset;
_msf_output.MAPQ = 255;
_msf_output.CIGAR = cigar;
_msf_output.MRNAME = "*";
_msf_output.MPOS = 0;
_msf_output.ISIZE = 0;
_msf_output.SEQ = _tmpSeq;
_msf_output.QUAL = _tmpQual;
_msf_output.optSize = 2;
_msf_output.optFields = _msf_optionalFields;
_msf_optionalFields[0].tag = "NM";
_msf_optionalFields[0].type = 'i';
_msf_optionalFields[0].iVal = err;
_msf_optionalFields[1].tag = "MD";
_msf_optionalFields[1].type = 'Z';
_msf_optionalFields[1].sVal = editString;
output(_msf_output, thread_id); // single
if (_msf_seqList[readNumber].hits[0] == 1) {
mappedSeqCnt++;
}
if ( maxHits == 0 ) {
_msf_seqList[readNumber].hits[0] = 2;
}
if ( maxHits!=0 && _msf_seqList[readNumber].hits[0] == maxHits) {
completedSeqCnt++;
break;
}
}
}
}
|
udr-1.c | /* { dg-do run } */
extern
#ifdef __cplusplus
"C"
#endif
void abort ();
struct S { int s; struct S *t; };
void
foo (struct S *out, struct S *in)
{
out->s += in->s;
}
void
bar (struct S *x)
{
if (x->s != 6) abort ();
x->s = 15;
}
void
baz (struct S *x, struct S *y)
{
x->s = 6;
x->t = x;
(void) y;
}
#pragma omp declare reduction (foo: struct S: foo (&omp_out, &omp_in)) \
initializer (omp_priv = { 8, &omp_priv })
#pragma omp declare reduction (foo: char, int, short: omp_out += omp_in - 4) \
initializer (omp_priv = 4)
#pragma omp declare reduction (+: struct S: foo (&omp_out, &omp_in)) \
initializer (baz (&omp_priv, &omp_orig))
void
test (struct S s, struct S t)
{
int q = 0;
#pragma omp parallel num_threads (4) reduction (+: s, q) reduction (foo: t)
{
if (s.s != 6 || s.t != &s || t.s != 8 || t.t != &t)
abort ();
s.s = 2;
t.s = 3;
q = 1;
}
if (s.s != 12 + 2 * q || t.s != 14 + 3 * q)
abort ();
}
int
main ()
{
struct S s, t;
s.s = 9; t.s = 10;
int h = 30, v = 2, q = 0;
#pragma omp declare reduction (foo: struct S: omp_out.s *= omp_in.s) \
initializer (omp_priv = omp_orig)
{
#pragma omp declare reduction (foo: struct S: omp_out.s += omp_in.s) \
initializer (omp_priv = omp_orig)
#pragma omp parallel num_threads (4) reduction (+: t, q) \
reduction (min: h) reduction (foo: s, v)
{
if (s.s != 9 || t.s != 6 || v != 4 || h != __INT_MAX__) abort ();
asm volatile ("" : "+m" (s.s), "+m" (t.s));
asm volatile ("" : "+r" (h), "+r" (v));
h = t.s; s.s++; t.s++; v++; q++;
}
}
if (h != 6 || s.s != 9 + q * 10 || t.s != 10 + q * 7 || v != 2 + q)
abort ();
s.s = 12;
t.s = 14;
test (s, t);
return 0;
}
|
soma_clustering.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
//
// This model examplifies the use of extracellur diffusion and shows
// how to extend the default "Cell". In step 0 one can see how an extra
// data member is added and can be accessed throughout the simulation with
// its Get and Set methods. N cells are randomly positioned in space, of which
// half are of type 1 and half of type -1. Each type secretes a different
// substance. Cells move towards the gradient of their own substance, which
// results in clusters being formed of cells of the same type.
//
#ifndef DEMO_SOMA_CLUSTERING_H_
#define DEMO_SOMA_CLUSTERING_H_
#include <vector>
#include "biodynamo.h"
#include "my_cell.h"
#include "validation_criterion.h"
namespace bdm {
enum Substances { kSubstance0, kSubstance1 };
inline int Simulate(int argc, const char** argv) {
auto set_param = [](Param* param) {
// Create an artificial bound for the simulation space
param->bound_space = Param::BoundSpaceMode::kClosed;
param->min_bound = 0;
param->max_bound = 250;
param->unschedule_default_operations = {"mechanical forces"};
param->remove_output_dir_contents = true; // remove any old output files
};
Simulation simulation(argc, argv, set_param);
// Define initial model
auto* param = simulation.GetParam();
int num_cells = 20000;
#pragma omp parallel
simulation.GetRandom()->SetSeed(4357);
// Define the substances that cells may secrete
// Order: substance_name, diffusion_coefficient, decay_constant, resolution
ModelInitializer::DefineSubstance(kSubstance0, "Substance_0", 0.5, 0.1, 20);
ModelInitializer::DefineSubstance(kSubstance1, "Substance_1", 0.5, 0.1, 20);
int cell_type = 1;
std::string substance_name = "Substance_0";
auto construct = [&cell_type, &substance_name](const Double3& position) {
auto* cell = new MyCell(position, cell_type);
cell->SetDiameter(10);
cell->AddBehavior(new Secretion(substance_name));
cell->AddBehavior(new Chemotaxis(substance_name, 5));
return cell;
};
// Construct num_cells/2 cells of type 0
ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound,
num_cells / 2, construct);
// Construct num_cells/2 cells of type 1
cell_type = -1;
substance_name = "Substance_1";
ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound,
num_cells / 2, construct);
// Run simulation for N timesteps
simulation.GetScheduler()->Simulate(1000);
// Check if criterion is met
double spatial_range = 5;
auto crit = GetCriterion(spatial_range, num_cells / 8);
if (crit) {
std::cout << "Simulation completed successfully!\n";
}
return !crit;
}
} // namespace bdm
#endif // DEMO_SOMA_CLUSTERING_H_
|
GB_binop__minus_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int32)
// A*D function (colscale): GB (_AxD__minus_int32)
// D*A function (rowscale): GB (_DxB__minus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int32)
// C=scalar+B GB (_bind1st__minus_int32)
// C=scalar+B' GB (_bind1st_tran__minus_int32)
// C=A+scalar GB (_bind2nd__minus_int32)
// C=A'+scalar GB (_bind2nd_tran__minus_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_01__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc32)
// A*D function (colscale): GB (_AxD__rdiv_fc32)
// D*A function (rowscale): GB (_DxB__rdiv_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc32)
// C=scalar+B GB (_bind1st__rdiv_fc32)
// C=scalar+B' GB (_bind1st_tran__rdiv_fc32)
// C=A+scalar GB (_bind2nd__rdiv_fc32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_div (bij, aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_div (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC32 || GxB_NO_RDIV_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__le_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int32)
// A*D function (colscale): GB (_AxD__le_int32)
// D*A function (rowscale): GB (_DxB__le_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int32)
// C=scalar+B GB (_bind1st__le_int32)
// C=scalar+B' GB (_bind1st_tran__le_int32)
// C=A+scalar GB (_bind2nd__le_int32)
// C=A'+scalar GB (_bind2nd_tran__le_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
linked_omp25.c | #include <stdlib.h>
#include <stdio.h>
#include "omp.h"
#define N 5
#define FS 38
#define NMAX 10
struct node
{
int data;
int fibdata;
struct node *next;
};
int fib(int n)
{
int x, y;
if (n < 2)
{
return (n);
}
else
{
x = fib(n - 1);
y = fib(n - 2);
return (x + y);
}
}
void processwork(struct node *p)
{
int n;
n = p->data;
p->fibdata = fib(n);
}
struct node *init_list(struct node *p)
{
int i;
struct node *head = NULL;
struct node *temp = NULL;
head = malloc(sizeof(struct node));
p = head;
p->data = FS;
p->fibdata = 0;
for (i = 0; i < N; i++)
{
temp = malloc(sizeof(struct node));
p->next = temp;
p = temp;
p->data = FS + i + 1;
p->fibdata = i + 1;
}
p->next = NULL;
return head;
}
int main(int argc, char *argv[])
{
double start, end;
struct node *p = NULL;
struct node *temp = NULL;
struct node *head = NULL;
struct node *parr[NMAX];
int i, count = 0;
printf("Process linked list\n");
printf(" Each linked list node will be processed by function 'processwork()'\n");
printf(" Each ll node will compute %d fibonacci numbers beginning with %d\n", N, FS);
p = init_list(p);
head = p;
start = omp_get_wtime();
{
while (p != NULL)
{
processwork(p);
p = p->next;
}
}
end = omp_get_wtime();
printf("serial Compute Time: %f seconds\n", end - start);
p = head;
start = omp_get_wtime();
{
// count number of items in the list. Strictly speaking this isn't
// needed since we know there are N elements in the list. But in
// most cases you don't know this and need to count nodes.
while (p != NULL)
{
p = p->next;
count++;
}
// traverse the list and collect pointers into an array.
p = head;
for (i = 0; i < count; i++)
{
parr[i] = p;
p = p->next;
}
// do the work in parallel
#pragma omp parallel
{
#pragma omp single
printf(" %d threads \n", omp_get_num_threads());
#pragma omp for schedule(static, 1)
for (i = 0; i < count; i++)
processwork(parr[i]);
}
}
end = omp_get_wtime();
p = head;
while (p != NULL)
{
printf("%d : %d\n", p->data, p->fibdata);
temp = p->next;
free(p);
p = temp;
}
free(p);
printf("Compute Time: %f seconds\n", end - start);
return 0;
}
|
decoder.c | /*! @file
* @brief
*
* @version 1.0.0
*
* (C) Copyright 2017 GoPro Inc (http://gopro.com/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "config.h"
#include "timing.h"
#if WARPSTUFF
#include "WarpLib.h"
#endif
//#include <stdlib.h>
#include <stddef.h>
#include <math.h>
#include <memory.h>
#include <time.h>
//#include <stdint.h>
#ifndef DEBUG
#define DEBUG (1 && _DEBUG)
#endif
#ifndef TIMING
#define TIMING (1 && _TIMING)
#endif
#ifndef XMMOPT
#define XMMOPT (1 && _XMMOPT)
#endif
#define GEN_LICENSE 0
#ifndef PI
#define PI 3.14159265359f
#endif
#ifdef _WINDOWS
#include <windows.h>
#elif __APPLE__
#include "macdefs.h"
#else
#ifndef ZeroMemory
#define ZeroMemory(p,s) memset(p,0,s)
#endif
#endif
#include <stdio.h>
#include <assert.h>
#include <emmintrin.h> // Intel aligned alloc and free
#include "dump.h"
#include "decoder.h"
#include "codec.h"
#include "vlc.h"
#include "codebooks.h" // References to the codebooks
#include "debug.h"
#include "color.h" // Color formats supported by image processing routines
#include "image.h"
#include "filter.h"
#include "spatial.h"
#include "temporal.h"
//#include "logo40x5.h"
#include "convert.h"
#include "wavelet.h"
#include "bitstream.h"
#include "frame.h"
#include "cpuid.h"
#include "bayer.h"
#include "metadata.h"
#include "DemoasicFrames.h" //TODO: Change filename to lower case
#include "swap.h"
#include "draw.h"
#include "RGB2YUV.h"
#include "lutpath.h"
#include "exception.h"
extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain,
int16_t *sptr, int resolution, int pixelsize);
extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize);
extern void FastSharpeningBlurVWP13(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
extern void FastSharpeningBlurVW13A(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
#ifdef SPI_LOADER
#include "spi.h"
#include "keyframes.h"
#endif
#ifndef DUMP
#define DUMP (0 && _DUMP)
#endif
#define ERROR_TOLERANT 1
#if defined(_WINDOWS) && DEBUG
#include <tchar.h> // For printing debug string in the console window
#endif
#define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform
#define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform
#if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch
#if _DECODE_TRANSFORM == 0
#define _DECODE_TRANSFORM 1
#endif
#endif
#ifndef _FSMBUFFER
#define _FSMBUFFER 0
#endif
// Turn off saturation in this file
#ifdef SATURATE
#undef SATURATE
#endif
#define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x))
#define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x))
//#define SATURATE8S(x) SATURATE_8S(x)
//#define SATURATE(x) (x)
// Enable or disable function inlining
#if 1 //DEBUG
#define inline
#else
#define inline __forceinline
#endif
// Pixel size used for computing the compression ratio
#define BITS_PER_PIXEL 8
// Default processor capabilities
#define DEFAULT_FEATURES (_CPU_FEATURE_MMX )
#define DEMOSAIC_DELAYLINES 4
// Forward references
void AllocDecoderGroup(DECODER *decoder);
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format);
void EraseDecoderFrames(DECODER *decoder);
TRANSFORM *AllocGroupTransform(GROUP *group, int channel);
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format);
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile);
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch);
#endif
bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision);
extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index,
uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading,
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes);
//extern void ComputeCube(DECODER *decoder);
extern bool NeedCube(DECODER *decoder);
extern void LoadTweak();
//extern int g_topdown;
//extern int g_bottomup;
// Performance measurements
#if _TIMING
extern TIMER tk_decompress; // Timers
extern TIMER tk_decoding;
extern TIMER tk_convert;
extern TIMER tk_inverse;
extern COUNTER decode_byte_count; // Counters
extern COUNTER sample_byte_count;
extern COUNTER alloc_group_count;
extern COUNTER alloc_transform_count;
extern COUNTER alloc_buffer_count;
extern COUNTER spatial_decoding_count;
extern COUNTER temporal_decoding_count;
extern COUNTER progressive_decode_count;
#endif
#if 0
// Table that maps from decoded format to pixel size
static const int PixelSize[] =
{
0, // DECODED_FORMAT_UNSUPPORTED
2, // DECODED_FORMAT_YUYV
2, // DECODED_FORMAT_UYVY
2, // DECODED_FORMAT_420
4, // DECODED_FORMAT_RGB32
3, // DECODED_FORMAT_RGB24
2, // DECODED_FORMAT_RGB555
2, // DECODED_FORMAT_RGB565
#if 0
2, // DECODED_FORMAT_YUYV_INVERTED
2, // DECODED_FORMAT_UYVY_INVERTED
2, // DECODED_FORMAT_420_INVERTED
#endif
4, // DECODED_FORMAT_RGB32_INVERTED
3, // DECODED_FORMAT_RGB24_INVERTED
2, // DECODED_FORMAT_RGB555_INVERTED
2, // DECODED_FORMAT_RGB565_INVERTED
3, // DECODED_FORMAT_V210,
4, // DECODED_FORMAT_YU64, // Custom 16 bits per channel (all data scaled up) YUYV format.
4, // DECODED_FORMAT_YR16 // Rows of YUV with 16 bits per channel
};
#if _DEBUG
char *decoded_format_string[] =
{
"Unsupported",
"YUYV",
"UYUV",
"420",
"RGB32",
"RGB24",
"RGB555",
"RGB565",
#if 0
"YUYV Inverted",
"UYVY Inverted",
"420 Inverted",
#endif
//#if BUILD_PROSPECT
"RGB32 Inverted",
"RGB24 Inverted",
"RGB555 Inverted",
"RGB565 Inverted",
"V210"
//#endif
};
#endif
#else
static const int pixel_size_table[] =
{
0, // COLOR_FORMAT_UNKNOWN
2, // COLOR_FORMAT_UYVY
2, // COLOR_FORMAT_YUYV
2, // COLOR_FORMAT_YVYU
0, // COLOR_FORMAT_YV12
0, // COLOR_FORMAT_I420
2, // COLOR_FORMAT_RGB16
3, // COLOR_FORMAT_RGB24
4, // COLOR_FORMAT_RGB32
0,
3, // COLOR_FORMAT_V210
0, // COLOR_FORMAT_RGB10
4, // COLOR_FORMAT_YU64
4, // COLOR_FORMAT_YR16
4, // COLOR_FORMAT_YUVA
};
static const int pixel_size_table_length = sizeof(pixel_size_table)/sizeof(pixel_size_table[0]);
static int PixelSize(int format)
{
int pixel_size = 0;
// Mask off the other fields in the format descriptor
// Use the lookup table to determine the pixel size (if possible)
if (0 <= format && format < pixel_size_table_length)
{
pixel_size = pixel_size_table[format];
//return pixel_size;
}
//TODO: Change the rest of this routine into one big switch statement
// Is this an Avid format?
else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END)
{
switch (format)
{
case COLOR_FORMAT_CbYCrY_8bit:
case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane
pixel_size = 1;
break;
case COLOR_FORMAT_CbYCrY_16bit:
case COLOR_FORMAT_CbYCrY_16bit_2_14:
case COLOR_FORMAT_CbYCrY_16bit_10_6:
pixel_size = 2;
break;
default:
assert(0);
pixel_size = 2; // Assume 16 bits per pixel if the format is unknown
break;
}
}
// Is this a Bayer format?
else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END)
{
pixel_size = (format - 100);
if(pixel_size > 2)
pixel_size = 2;
}
else if (format == COLOR_FORMAT_RG48)
pixel_size = 6;
else if (format == COLOR_FORMAT_RG64)
pixel_size = 8;
else if (format == COLOR_FORMAT_B64A) {
pixel_size = 8;
}
return pixel_size;
}
#endif
int DecodedPixelSize(DECODED_FORMAT format)
{
int pixel_size = 0;
// Compute the pixel size
switch (format)
{
case DECODED_FORMAT_YUYV:
pixel_size = 2;
break;
case DECODED_FORMAT_RGB32:
pixel_size = 4;
break;
case DECODED_FORMAT_RG48:
pixel_size = 6;
break;
case DECODED_FORMAT_CT_UCHAR:
pixel_size = 2;
break;
case DECODED_FORMAT_CT_SHORT:
case DECODED_FORMAT_CT_SHORT_2_14:
case DECODED_FORMAT_CT_USHORT_10_6:
pixel_size = 4;
break;
case DECODED_FORMAT_CT_10Bit_2_8:
case DECODED_FORMAT_V210:
// This routine should not be called to compute the pixel sizes for these formats
assert(0);
return 0;
break;
case DECODED_FORMAT_ROW16U:
pixel_size = 4;
break;
default:
assert(0);
return 0;
break;
}
return pixel_size;
}
#if 0
// Convert FOURCC code to a string
static void str4cc(char *string, uint32_t marker)
{
char *p = (char *)&marker + 3;
char *s = string;
int i;
for (i = 0; i < 4; i++)
*(s++) = *(p--);
*s = '\0';
}
#endif
void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h)
{
int origw,origh, guess = 0;
origw = decoder->frame.width;
origh = decoder->frame.height;
switch(decoder->frame.resolution)
{
case DECODED_RESOLUTION_FULL:
break;
case DECODED_RESOLUTION_HALF:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
origw *= 8;
origh *= 8;
break;
case DECODED_RESOLUTION_FULL_DEBAYER:
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved.
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
origw *= 2;
break;
case DECODED_RESOLUTION_HALF_VERTICAL:
origh *= 2;
break;
}
if(decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0)
guess = 1;
// if guess default values, we can't trust them
if(decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9)
guess = 1;
if(decoder->pixel_aspect_x && decoder->pixel_aspect_y)
{
int j,den,num;
decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y;
decoder->codec.picture_aspect_y = den = origh;
for(j=2; j<num+den; j++)
{
while(num == (num/j)*j && den == (den/j)*j)
{
num /= j;
den /= j;
}
}
decoder->codec.picture_aspect_x = num;
decoder->codec.picture_aspect_y = den;
guess = 0;
}
if(guess)
{
if(origw > 720) //HD.
{
if(origh == 1080)
{
if(origw == 2048)
*w=origw,*h=origh;
else
*w=16,*h=9; // assume 16x9
}
else if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
else
{
if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
}
else
{
*w=decoder->codec.picture_aspect_x;
*h=decoder->codec.picture_aspect_y;
}
}
bool IsValidFrameResolution(int resolution)
{
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
return true;
default:
return false;
}
}
// Return true if this decoder can decode to quarter resolution
bool IsQuarterResolutionEnabled(DECODER *decoder)
{
return true;
}
size_t DecoderSize()
{
return sizeof(DECODER);
}
void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs)
{
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder);
}
#endif
{
//TODO: Clear the decoder before setting the CPU limit and affinity
int i;
//int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0;
//save key params
Thread_cntrl saved_params = decoder->thread_cntrl;
// Clear everything
memset(decoder, 0, sizeof(DECODER));
//restore key params
if(saved_params.set_thread_params == 1) // used by the DShow Interface
{
decoder->thread_cntrl = saved_params;
}
#if _TIMING
InitTiming();
#endif
// Set the file for status information during decoding
decoder->logfile = logfile;
// Initialize the decoding error to no error
decoder->error = CODEC_ERROR_OKAY;
// Most recent marker found during decoding
decoder->marker = 0;
// Count of frames decoded
decoder->frame_count = 0;
// Set the codebooks that will be used for decoding
if (cs != NULL)
{
// Use the codeset provided in the call
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
// Codebook for decoding highpass coefficients
decoder->magsbook[i] = cs[i].magsbook;
// Codebook for decoding runs of coefficients
decoder->runsbook[i] = cs[i].runsbook;
// Lookup table for fast codebook search
decoder->fastbook[i] = cs[i].fastbook;
}
}
else
{
// Use the default codeset
decoder->magsbook[0] = cs9.magsbook;
decoder->runsbook[0] = cs9.runsbook;
decoder->fastbook[0] = cs9.fastbook;
}
// Initialize the codec state
InitCodecState(&decoder->codec);
InitScratchBuffer(&decoder->scratch, NULL, 0);
#if _DUMP
// Initialize the descriptor for controlling debug output
decoder->dump.enabled = false;
decoder->dump.channel_mask = 0;
decoder->dump.wavelet_mask = 0;
memset(decoder->dump.directory, 0, sizeof(decoder->dump.directory));
memset(decoder->dump.filename, 0, sizeof(decoder->dump.filename));
#endif
}
//REDTEST
decoder->frm = 0;
decoder->run = 1;
#if _ALLOCATOR
decoder->allocator = NULL;
#endif
decoder->initialized = 1; //DAN20060912
}
void InitDecoderLicense(DECODER *decoder, const unsigned char *licensekey)
{
if (decoder && licensekey)
{
const unsigned char unlicensed[16] = {0};
//memset(unlicensed, 0, sizeof(unlicensed));
// Has the license been set?
if (memcmp(decoder->licensekey, unlicensed, sizeof(decoder->licensekey)) == 0)
{
// Copy the license into the decoder
memcpy(decoder->licensekey, licensekey, sizeof(decoder->licensekey));
}
}
}
// Free data allocated within the decoder
void ClearDecoder(DECODER *decoder)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Free the transforms allocated in the decoder
int i;
if(decoder->initialized == 0)
return; // nothing to free //DAN20060912
#if _GRAPHICS
DrawClose(decoder);
#endif
for(i=0; i<=METADATA_PRIORITY_MAX; i++)
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
if(decoder->sqrttable)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->sqrttable);
#else
MEMORY_FREE(decoder->sqrttable);
#endif
decoder->sqrttable = NULL;
}
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
#if _ALLOCATOR
FreeTransform(allocator, decoder->transform[i]);
#else
FreeTransform(decoder->transform[i]);
#endif
decoder->transform[i] = NULL;
}
if(decoder->aligned_sample_buffer)
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
#endif
decoder->aligned_sample_buffer = NULL;
decoder->aligned_sample_buffer_size = 0;
}
if(decoder->tools)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->tools);
#else
MEMORY_FREE(decoder->tools);
#endif
decoder->tools = NULL;
}
// Free the buffer allocated for decoding
if (decoder->buffer != NULL)
{
#if DEBUG_BUFFER_USAGE
int i;
char *ptr = (char *)decoder->buffer;
FILE *fp = fopen("C:/free.txt", "a");
fprintf(fp, "decoder->buffer = %08x buffer_size = %d\n", decoder->buffer ,decoder->buffer_size);
i = decoder->buffer_size-1;
while(ptr[i] == 1) i--;
fprintf(fp, "used %2.3f percent\n", 100.0*(float)i/(float)decoder->buffer_size);
fclose(fp);
#endif
#if _ALLOCATOR
FreeAligned(allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
// Clear the fields in the scratch buffer descriptor
memset(&decoder->scratch, 0, sizeof(SCRATCH));
// Eventually the buffer and buffer size fields will be obsolete
}
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
// Do not attempt to free the codebooks since the
// codebook pointers are references to static tables
// Can free some of the data structures allocated by the decoder
FreeCodebooks(decoder);
#if _INTERLACED_WORKER_THREADS
if(decoder->interlaced_worker.lock_init) // threads started
{
int i;
// Signal this thread to stop
SetEvent(decoder->interlaced_worker.stop_event);
// Free all handles used by the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
WaitForSingleObject(decoder->interlaced_worker.handle[i], INFINITE); //JY20080307
CloseHandle(decoder->interlaced_worker.handle[i]);
CloseHandle(decoder->interlaced_worker.start_event[i]);
CloseHandle(decoder->interlaced_worker.done_event[i]);
}
CloseHandle(decoder->interlaced_worker.row_semaphore);
CloseHandle(decoder->interlaced_worker.stop_event);
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.handle[i] = 0;
decoder->interlaced_worker.start_event[i] = 0;
decoder->interlaced_worker.done_event[i] = 0;
}
decoder->interlaced_worker.row_semaphore = 0;
decoder->interlaced_worker.stop_event = 0;
}
// Free the critical section used by the worker threads
DeleteCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 0;
#endif
#if _THREADED
if(decoder->entropy_worker_new.pool.thread_count)
{
ThreadPoolDelete(&decoder->entropy_worker_new.pool);
DeleteLock(&decoder->entropy_worker_new.lock);
}
if(decoder->worker_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->worker_thread.pool);
DeleteLock(&decoder->worker_thread.lock);
}
if(decoder->draw_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->draw_thread.pool);
DeleteLock(&decoder->draw_thread.lock);
}
/*
if(decoder->qt_convert_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_convert_worker.pool);
DeleteLock(&decoder->qt_convert_worker.lock);
}
if(decoder->qt_scale_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_scale_worker.pool);
DeleteLock(&decoder->qt_scale_worker.lock);
}
*/
if(decoder->parallelDecoder)
{
if(decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool);
DeleteLock(&decoder->parallelDecoder->decoder_thread.lock);
decoder->parallelDecoder->decoder_thread.pool.thread_count = 0;
}
ClearDecoder(decoder->parallelDecoder);
#if _ALLOCATOR
Free(decoder->allocator, decoder->parallelDecoder);
#else
MEMORY_FREE(decoder->parallelDecoder);
#endif
decoder->parallelDecoder = NULL;
}
#endif
//MEMORY_ALIGNED_FREE(RawBayer16);
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = 0;
decoder->RGBFilterBufferSize = 0;
}
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = 0;
decoder->RawBayerSize = 0;
}
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = 0;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
FreeAligned(decoder->allocator, decoder->RawCube);
decoder->RawCube = 0;
}
if(decoder->Curve2Linear)
{
FreeAligned(decoder->allocator, decoder->Curve2Linear);
decoder->Curve2Linear = 0;
}
if(decoder->Linear2CurveRed)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->BYR4LinearRestore)
{
FreeAligned(decoder->allocator, decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->GammaContrastRed)
{
FreeAligned(decoder->allocator, decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
FreeAligned(decoder->allocator, decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
FreeAligned(decoder->allocator, decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
Free(decoder->allocator, decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLensGoPro = 0;
decoder->lastLensSphere = 0;
decoder->lastLensFill = 0;
decoder->lastLensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
}
#endif
if(decoder->overrideData)
{
Free(decoder->allocator, decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
Free(decoder->allocator, decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
}
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
MEMORY_ALIGNED_FREE(decoder->RawCube);
decoder->RawCube = NULL;
}
if(decoder->Curve2Linear)
{
MEMORY_ALIGNED_FREE(decoder->Curve2Linear);
decoder->Curve2Linear = NULL;
}
if(decoder->BYR4LinearRestore)
{
MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->Linear2CurveRed)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->GammaContrastRed)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
MEMORY_FREE(decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(mesh);
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLlensGoPro = 0;
decoder->lastLlensSphere = 0;
decoder->lastLlensFill = 0;
decoder->lastLlensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
}
#endif
if(decoder->overrideData)
{
MEMORY_FREE(decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
MEMORY_FREE(decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#endif
#ifdef SPI_LOADER
SPIReleaseAll(decoder);
//KeyframesReleaseAll(decoder);
#endif
decoder->initialized = 0;// cleared
}
void ExitDecoder(DECODER *decoder)
{
// Let the caller keep the logfile open or choose to close it
//if (logfile) fclose(logfile);
// Free data allocated within the decoder
ClearDecoder(decoder);
}
// Allocate the data structures for decoding a group
void AllocDecoderGroup(DECODER *decoder)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;//DAN07022004
int channel;
assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004
{
TRANSFORM *transform = decoder->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) {
decoder->error = CODEC_ERROR_TRANSFORM_MEMORY;
return;
}
memset(transform, 0, sizeof(TRANSFORM));
decoder->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
}
}
// Allocate the buffer used for intermediate results during decoding
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
int cpus;
size_t size;
size_t row_size;
char *buffer;
#if 0
// Allocate a buffer large enough for six rows of cache lines
size = width * sizeof(PIXEL);
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 2 * TRANSFORM_MAX_CHANNELS * size;
#else
// Allocate a buffer large enough for nine rows of cache lines
size = width * sizeof(PIXEL) * 4;
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 3 * TRANSFORM_MAX_CHANNELS * size;
#endif
switch (format)
{
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
// Increase the buffer size for decoding to the V210 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_CbYCrY_10bit_2_8:
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
// Increase the buffer size for decoding to the YUV16 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 8 * 2 * row_size;
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_WP13:
// Increase the buffer size for decoding to the YUV16 format
row_size = 6 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 12 * 2 * row_size;
break;
case DECODED_FORMAT_RG64:
// Increase the buffer size for decoding to the YUV16 format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
case DECODED_FORMAT_BYR3:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_BYR4:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_W13A:
// Increase the buffer size for decoding to the B64A format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
default:
// Increase the buffer size for YUV to RGB conversion
row_size = 3 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 2 * 2 * row_size;
break;
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if(cpus > 4)
size *= 4;
if(cpus > 16) //DAN20120803 -- 4444 clips
size *= 2;
// Has a buffer already been allocated?
if (decoder->buffer != NULL)
{
// Is the buffer large enough?
if (decoder->buffer_size < size)
{
// Free the previous buffer
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
}
else
{
return true;
}
}
buffer = decoder->buffer;
if(buffer == NULL)
{
// Allocate the decoding buffer
#if _ALLOCATOR
buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(buffer == NULL)
{
return false;
}
}
#if DEBUG_BUFFER_USAGE
memset(buffer, 1, size);
#endif
// Save the buffer and its size in the decoder
decoder->buffer = buffer;
decoder->buffer_size = size;
// Initialize the scratch space descriptor
InitScratchBuffer(&decoder->scratch, buffer, size);
// allocate buffer for each debayer/color formating thread
{
int i;
size = (width+16)*3*2*4*2*4;// sixteen lines
if(height*4 > width*3) //square or tall images where running out of scratch space for zooms.
size *= 1 + ((height+(width/2))/width);
if (decoder->threads_buffer_size < size)
{
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
}
for(i=0;i<cpus;i++)
{
if(decoder->threads_buffer[i] == NULL)
{
#if _ALLOCATOR
decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(decoder->threads_buffer[i] == NULL)
{
return false;
}
}
}
decoder->threads_buffer_size = size;
}
// Eventually the scratch space descriptor will replace the buffer and buffer_size fields
return true;
}
bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
// Check that the dimensions are valid
assert(width > 0);
assert(height > 0);
// Just call the allocation routine
return AllocDecoderBuffer(decoder, width, height, format);
}
void ClearTransformFlags(DECODER *decoder)
{
TRANSFORM **transform_array = decoder->transform;
int channel;
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)
{
TRANSFORM *transform = transform_array[channel];
int index;
if (transform == NULL) break;
for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++)
{
IMAGE *wavelet = transform->wavelet[index];
if (wavelet != NULL) {
wavelet->band_valid_flags = 0;
wavelet->band_started_flags = 0;
}
}
}
}
// Initialize the tables for decoding the wavelet transforms
void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands)
{
size_t subband_table_size = num_subbands * sizeof(int);
memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index));
memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size);
memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index));
memcpy(decoder->subband_band_index, subband_band_index, subband_table_size);
}
#if 0
static bool IsValidFormat(int format)
{
bool valid_format = true;
//TODO: Change this routine into a switch statement
if(format == COLOR_FORMAT_BYR5)
return true; // can decode to BYR5
if(format == COLOR_FORMAT_BYR4)
return true; // can decode to BYR4
if(format == COLOR_FORMAT_BYR3)
return true; // can decode to BYR3
if(format == COLOR_FORMAT_BYR2)
return true; // can decode to BYR2
if(format == COLOR_FORMAT_RG48)
return true; // can decode to RGB48
if(format == COLOR_FORMAT_RG64)
return true; // can decode to RGBA64
if (format == COLOR_FORMAT_B64A)
{
return true; // Can decode to B64A
}
if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT)) {
valid_format = false;
}
return valid_format;
}
#endif
#if _INTERLACED_WORKER_THREADS
void StartInterlaceWorkerThreads(DECODER *decoder)
{
int i;
if(decoder->interlaced_worker.lock_init == 0)
{
// Create events for starting the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create a semaphore to signal the worker threads to process rows
decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
// Create an event for each worker thread to signal that it has finished
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create an event for forcing the worker threads to terminate
decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL);
// Zero the count of worker threads that are active
decoder->interlaced_worker.thread_count = 0;
// Initialize the lock for controlling access to the worker thread data
InitializeCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 1;
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.id[i] = 0;
decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]);
assert(decoder->interlaced_worker.handle[i] != NULL);
}
}
}
#endif
#if 0
int TestException(int x)
{
static volatile int y1 = 100;
volatile int x1 = x;
return y1 / x1;
}
#endif
// Process device driver request to initialize the decoder
#if _ALLOCATOR
bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#else
bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#endif
{
CODESET codesets[CODEC_NUM_CODESETS];
int i;
int cpus;
//int x = 0;
#if CODEC_NUM_CODESETS == 3
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET));
#elif CODEC_NUM_CODESETS == 2
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
#else
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
#endif
#ifdef _WINDOWS
// Set the handler for system exceptions
SetDefaultExceptionHandler();
#endif
//TestException(x);
// Clear all decoder fields except the logfile and set the codebooks for decoding
InitDecoder(decoder, logfile, &codesets[0]);
#if _ALLOCATOR
decoder->allocator = allocator;
#endif
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
assert(cpus > 0 && cpus <= _MAX_CPUS);
// Decode to half resolution?
if (resolution == DECODED_RESOLUTION_HALF)
{
// Reduce the frame size by half in each dimension
width = width/2;
height = height/2;
}
else if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Reduce the frame size by one fourth in each dimension
width = width/4;
height = height/4;
}
// Initialize the codebooks
#if _ALLOCATOR
if (!InitCodebooks(decoder->allocator, codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#else
if (!InitCodebooks(codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#endif
// Initize the FSM
InitDecoderFSM(decoder, &codesets[0]);
// Check the frame dimensions and format
//assert(width > 0);
//assert(height > 0);
// assert(IsValidFormat(format));
#if _THREADED_DECODER
// Create a semaphore to signal the transform thread to begin processing
// Initialize the transform queue
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue));
#endif
#if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0
StartInterlaceWorkerThreads(decoder);
#endif
#if _THREADED
#if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
// Initialize the lock that controls access to the generic worker thread data
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
cpus,
WorkerThreadProc,
decoder);
#endif
#endif
// Set the frame dimensions and format
SetDecoderFormat(decoder, width, height, format, resolution);
// Allocate the data structure for decoding the samples
AllocDecoderGroup(decoder);
// Note that this code assumes that the samples to decode are groups
// as opposed to isolated frames which are not supported in this code
// Allocate a buffer for storing intermediate results during decoding
if (!AllocDecoderBuffer(decoder, width, height, format)) {
return false;
}
// Should check that the finite state machine tables were initialized
assert(decoder->fsm[0].table.flags < 0);
// Initialize the finite state machine for this decoder
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
InitFSM(&decoder->fsm[i], codesets[i].fsm_table);
#if _COMPANDING
// Scale the values in the finite state machine entries for companding
ScaleFSM(&decoder->fsm[i].table);
#endif
}
// Indicate that the decoder has been initialized
decoder->state = DECODER_STATE_INITIALIZED;
#if (1 && DUMP)
// Write the wavelet bands as images
SetDumpDirectory(CODEC_TYPE(decoder), DUMP_DECODER_DIRECTORY);
SetDumpFilename(CODEC_TYPE(decoder), DUMP_DEFAULT_FILENAME);
SetDumpChannelMask(CODEC_TYPE(decoder), 1/*ULONG_MAX*/);
// SetDumpWaveletMask(CODEC_TYPE(decoder), 7<<4 | 1/*ULONG_MAX*/);
SetDumpWaveletMask(CODEC_TYPE(decoder), ULONG_MAX);
// Set this flag to enable output
decoder->dump.enabled = true;
#endif
#if _TIMING
// Initialize the global timers and counters
InitTiming();
#endif
//DAN20160203 Fix for a memory leak in InitCookbooks
for (i = 0; i < CODEC_NUM_CODESETS; i++)
{
#if _ALLOCATOR
Free(allocator, codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
Free(allocator, codesets[i].fastbook); codesets[i].fastbook = NULL;
Free(allocator, codesets[i].valuebook); codesets[i].valuebook = NULL;
#else
MEMORY_FREE(codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
MEMORY_FREE(codesets[i].fastbook); codesets[i].fastbook = NULL;
MEMORY_FREE(codesets[i].valuebook); codesets[i].valuebook = NULL;
#endif
}
// The decoder has been initialized successfully
return true;
}
void DecodeEntropyInit(DECODER *decoder)
{
int cpus = 1;
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit)
{
cpus = decoder->cfhddata.cpu_limit;
decoder->thread_cntrl.limit = cpus;
decoder->thread_cntrl.set_thread_params = 1;
decoder->thread_cntrl.capabilities &= 0xffff;
decoder->thread_cntrl.capabilities |= cpus<<16;
}
assert(cpus > 0 && cpus <= _MAX_CPUS);
#if _THREADED
#if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
#endif
#endif
}
bool DecodeOverrides(DECODER *decoder, unsigned char *overrideData, int overrideSize)
{
if(decoder->overrideData)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->overrideData);
#else
MEMORY_FREE(decoder->overrideData);
#endif
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
if(overrideSize)
{
#if _ALLOCATOR
decoder->overrideData = Alloc(decoder->allocator, overrideSize);
#else
decoder->overrideData = MEMORY_ALLOC(overrideSize);
#endif
if(decoder->overrideData)
{
memcpy(decoder->overrideData, overrideData, overrideSize);
decoder->overrideSize = overrideSize;
}
}
else
{
int i;
for(i=METADATA_PRIORITY_OVERRIDE; i<=METADATA_PRIORITY_MAX; i++) //This was 0 to max but that cause right eye primary corrections(side-by-side) mode to flicker.
// This database cleariing was added but I don't know why.
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
}
return true;
}
TRANSFORM *AllocGroupTransform(GROUP *group, int channel)
{
#if _ALLOCATOR
//TODO:ALLOC Change this routine to take an allocator as the first argument
ALLOCATOR *allocator = NULL;
#endif
TRANSFORM *transform;
// Channel zero is a special case because it may mean
// that the group header has not been decoded yet
if (channel != 0)
{
// Make sure that the channel number is in range
assert(0 <= channel && channel < group->header.num_channels);
if (!(0 <= channel && channel < group->header.num_channels))
return NULL;
}
transform = group->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) return NULL;
memset(transform, 0, sizeof(TRANSFORM));
group->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
return transform;
}
//extern FILE *logfile;
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format)
{
size_t size = height * pitch;
union {
uint8_t byte[4];
uint32_t word;
} output;
switch (format)
{
case DECODED_FORMAT_YUYV:
output.byte[0] = COLOR_LUMA_BLACK;
output.byte[1] = COLOR_CHROMA_ZERO;
output.byte[2] = COLOR_LUMA_BLACK;
output.byte[3] = COLOR_CHROMA_ZERO;
break;
default:
//if (logfile) fprintf(logfile,"**Unknown format: %d\n", format);
//assert(0);
output.word = 0;
break;
}
memset(buffer, output.word, size);
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband);
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet);
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading);
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band);
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input);
// Apply the inverse horizontal-temporal transform to reconstruct the output frame
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size);
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision);
#endif
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// The first Bayer routine calls the other Bayer routines for the decoded resolution
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
// New code for handling the original YUV 4:2:2 encoded format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// Return true if the rest of the channel does not have to be decoded
static bool CanSkipChannel(DECODER *decoder, int resolution)
{
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int transform_type = transform->type;
// Can the rest of the channel be skipped?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
else
{
const uint32_t decoded_subband_mask_half = 0x7F;
const uint32_t decoded_subband_mask_quarter = 0x0F;
assert(transform_type == TRANSFORM_TYPE_SPATIAL);
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
// Cannot skip the rest of the channel
return false;
}
#if 0
static bool CanSkipSubband(DECODER *decoder, int subband)
{
// Bitmask indicates which subbands must be decoded for quarter resolution
static uint32_t quarter_resolution_mask = 0x008F;
// Convert the subband number into a bitmask (could use a lookup table)
uint32_t subband_mask = SUBBAND_MASK(subband);
// Select the resolution of the fully decoded frames
int resolution = decoder->frame.resolution;
switch (resolution)
{
case DECODED_RESOLUTION_QUARTER:
//if (4 <= subband && subband <= 6)
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if ((subband_mask & quarter_resolution_mask) == 0) {
return true;
}
}
break;
default:
// Assume that the subband must be decoded
break;
}
return false;
}
#endif
// Return true if the wavelet exists and all bands are valid
static bool AllBandsValid(IMAGE *wavelet)
{
return (wavelet != NULL && BANDS_ALL_VALID(wavelet));
}
#if DEBUG
static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
assert(0);
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
assert(0);
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!AllBandsValid(wavelet))
{
return false;
}
}
// All wavelet bands in all channels are valid
return true;
}
static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0))) {
return false;
}
}
// All lowpass bands in all channels are valid
return true;
}
#endif
static bool
ComputeFrameDimensionsFromFirstWavelet(int transform_type,
int first_wavelet_width,
int first_wavelet_height,
int *frame_width_out,
int *frame_height_out)
{
int frame_width;
int frame_height;
int expansion = 8;
switch (transform_type)
{
case TRANSFORM_TYPE_SPATIAL:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
case TRANSFORM_TYPE_FIELDPLUS:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
default:
assert(0);
return false;
}
// Return the frame dimensions
*frame_width_out = frame_width;
*frame_height_out = frame_height;
return true;
}
// Decode the sample header to determine the type of sample and other parameters
bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header)
{
TAGVALUE segment;
int sample_type;
int sample_size = 0;
// Group index
uint32_t channel_size[TRANSFORM_MAX_CHANNELS];
// Number of channels in the group index
int channel_count;
// Values used for computing the frame width and height (if necessary)
int transform_type = -1;
int first_wavelet_width = 0;
int first_wavelet_height = 0;
int display_height = 0;
int current_channel = 0;
int currentVideoChannel = header->videoChannels;
int find_lowpass_bands = header->find_lowpass_bands & 1;
int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0;
int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0;
if (header == NULL) {
return false;
}
if(currentVideoChannel == 0)
currentVideoChannel = 1;
// Clear the entire sample header to prevent early return from this routine
memset(header, 0, sizeof(SAMPLE_HEADER));
// Clear the error code
header->error = CODEC_ERROR_OKAY;
// Initialize the frame dimensions to unknown
header->width = 0;
header->height = 0;
header->videoChannels = 1;
// Initialize the original pixel format to unknown
header->input_format = COLOR_FORMAT_UNKNOWN;
// Initialize the encoded format to unknown
header->encoded_format = ENCODED_FORMAT_UNKNOWN;
// Clear the frame number in case it is not present in the sample
header->frame_number = 0;
// The video is not progressive if the sample flags are not present
header->hdr_progressive = false;
#if _BITSTREAM_UNALIGNED
// Record the alignment of the bitstream within the sample
SetBitstreamAlignment(input, 0);
#endif
sample_size = input->nWordsUsed;
// Get the type of sample (should be the first tag value pair)
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
header->error = CodecErrorBitstream(input);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = false;
break;
case SAMPLE_TYPE_FRAME: // The second or later frame in a group
header->key_frame = false;
header->difference_frame = true;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_IFRAME: // One frame in the group
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// Treat the video sequence header like a keyframe that can be dropped
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
default:
// Unknown type of sample
header->error = CODEC_ERROR_SAMPLE_TYPE;
return false;
break;
}
// Continue parsing the sample header until all of the information has been found
while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all
(find_uncompressed == 1 && current_channel < 1) ||
display_height == 0 ||
header->width == 0 ||
header->height == 0 ||
header->input_format == COLOR_FORMAT_UNKNOWN ||
header->frame_number == 0 ||
(header->interlaced_flags == 0 && header->hdr_progressive == 0))
{
int chunksize = 0;
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did the bitstream end before the last tag was found?
if (input->error == BITSTREAM_ERROR_UNDERFLOW) {
break;
}
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
header->error = CodecErrorBitstream(input);
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
if(segment.tuple.tag & 0x2000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
chunksize += ((segment.tuple.tag&0xff)<<16);
}
else if(segment.tuple.tag & 0x4000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
}
// else if(tag == CODEC_TAG_INDEX) // handled below
// {
// chunksize = value;
// chunksize &= 0xffff;
// }
else
{
chunksize = 0;
}
if((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000)
{
int skip = 1;
if((segment.tuple.tag & 0xff00) == 0x2200) //sample size
{
if(sample_size < chunksize*4)
find_header_info_only = 1;
skip = find_header_info_only;
if(currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only)
{
BITSTREAM input2;
SAMPLE_HEADER header2;
BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize*4);
int eye_offset = sample_size - input->nWordsUsed + chunksize*4; //approx
int eye_sample_size = input->nWordsUsed - eye_offset;
// Search for first sample of the next frame
while((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0)
{
eye2 += 4;
chunksize ++;
eye_offset += 4;
eye_sample_size -= 4;
}
// Save the offset to the right stereo sample
header->left_sample_size = eye_offset;
{
InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ);
memset(&header2, 0, sizeof(SAMPLE_HEADER));
header2.find_lowpass_bands = 1;
currentVideoChannel++;
header2.videoChannels = currentVideoChannel;
if(ParseSampleHeader(&input2, &header2))
{
int i;
for(i=0;i<4;i++)
{
if(header2.thumbnail_channel_offsets[i])
header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i];
}
}
}
}
}
if((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size
{
header->hdr_uncompressed = 1;
skip = 1;
if(find_lowpass_bands != 1)
break;
}
if((segment.tuple.tag & 0xff00) == 0x2100) //level
{
if(find_lowpass_bands == 1)
{
skip = 0;
}
else
{
skip = 1; // no header data after the fix level
break;
}
}
if(chunksize)
{
if(skip)
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
}
else
{
switch (segment.tuple.tag)
{
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
header->encoder_version = (((segment.tuple.value>>12) & 0xf)<<16) |
(((segment.tuple.value>>8) & 0xf)<<8) |
((segment.tuple.value) & 0xff);
break;
case CODEC_TAG_INDEX:
// Get the number of channels in the index to skip
channel_count = segment.tuple.value;
DecodeGroupIndex(input, (uint32_t *)&channel_size[0], channel_count);
break;
case CODEC_TAG_FRAME_WIDTH:
// Record the frame width in the sample header
header->width = segment.tuple.value;
break;
case CODEC_TAG_FRAME_HEIGHT:
// Record the frame height in the sample header
header->height = segment.tuple.value;
break;
case CODEC_TAG_FRAME_DISPLAY_HEIGHT:
display_height = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_WIDTH:
// Save the width of the smallest wavelet for computing the frame dimensions
first_wavelet_width = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_HEIGHT:
// Save the height of the smallest wavelet for computing the frame dimensions
first_wavelet_height = segment.tuple.value;
break;
case CODEC_TAG_TRANSFORM_TYPE:
// Save the type of transform for computing the frame dimensions (if necessary)
transform_type = segment.tuple.value;
break;
case CODEC_TAG_INPUT_FORMAT:
// Record the original format of the encoded frames
header->input_format = (COLOR_FORMAT)segment.tuple.value;
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
// Record the encoded format (internal representation)
header->encoded_format = (ENCODED_FORMAT)segment.tuple.value;
if(header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3)
header->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_NUMBER:
// Record the frame number for debugging
header->frame_number = segment.tuple.value;
break;
case CODEC_TAG_INTERLACED_FLAGS:
// Record the flags that indicate the field type
header->interlaced_flags = segment.tuple.value;
break;
case CODEC_TAG_SAMPLE_FLAGS:
// The sample flags specify progressive versus interlaced decoding
header->hdr_progressive = !!(segment.tuple.value & SAMPLE_FLAGS_PROGRESSIVE);
if (header->hdr_progressive) {
// Clear the interlaced flags
header->interlaced_flags = 0;
}
break;
case CODEC_TAG_LOWPASS_SUBBAND:
if(segment.tuple.value == 0) // low pass band
{
int count = 8;
uint32_t *lptr = (uint32_t *)input->lpCurrentWord;
do
{
uint32_t longword = SwapInt32(lptr[count]);
unsigned short t,v;
t = (longword>>16) & 0xffff;
v = (longword) & 0xffff;
if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4)
{
header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count*4 + 4;
break;
}
count++;
} while(count < 32);
current_channel++;
}
break;
case CODEC_TAG_ENCODED_CHANNELS:
if(header->videoChannels == 1)
{
header->videoChannels = segment.tuple.value;
if(header->videoChannels < 1)
header->videoChannels = 1;
}
break;
case CODEC_TAG_QUALITY_L: //
header->encode_quality &= 0xffff0000;
header->encode_quality |= segment.tuple.value;
break;
case CODEC_TAG_QUALITY_H: //
header->encode_quality &= 0xffff;
header->encode_quality |= segment.tuple.value<<16;
break;
}
// Have the encoded frame dimensions been computed?
if (header->width == 0 || header->height == 0)
{
// Found the first wavelet in the bitstream?
if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0)
{
// The group header did not contain tags for the frame dimensions
// prior to the release of support for RGB 4:4:4, so must attempt to
// compute the frame dimensions from the dimensions of the lowpass band.
int frame_width = 0;
int frame_height = 0;
// Use the dimensions of the first wavelet to compute the frame width and height
if (!ComputeFrameDimensionsFromFirstWavelet(transform_type,
first_wavelet_width,
first_wavelet_height,
&frame_width,
&frame_height)) {
// Could not compute the frame dimensions
header->error = CODEC_ERROR_FRAME_DIMENSIONS;
return false;
}
// Save the frame dimensions in the sample header
header->width = frame_width;
header->height = frame_height;
// No more header information after finding the lowpass band
break;
}
}
if(find_lowpass_bands != 1 && find_uncompressed != 1)
{
// No more header information after the first encoded band
if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER)
{
// Stop looking for header information
break;
}
// No more header information after the frame index
if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX)
{
// Stop looking for header information
break;
}
// No more header information after the lowpass band header
if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH)
{
// Stop looking for header information
break;
}
}
}
}
}
if (header->width == 0 || header->height == 0) {
assert(0);
}
// Fill in the encoded format if it was not present in the header
if (header->encoded_format == ENCODED_FORMAT_UNKNOWN) {
header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count);
}
if (display_height > 0) {
header->height = display_height;
}
if (header->encoded_format == ENCODED_FORMAT_BAYER)
{
header->width *= 2;
header->height *= 2;
if(display_height == 0)
{
if(header->height == 1088)
header->height = 1080;
}
}
// Return true if the header was parsed completely and correctly
return (header->width > 0 &&
header->height > 0 &&
((sample_type == SAMPLE_TYPE_FRAME) ||
(header->input_format != COLOR_FORMAT_UNKNOWN &&
header->encoded_format != ENCODED_FORMAT_UNKNOWN)));
// It is not an error if the frame number was not found in the sample header
}
bool DumpSampleHeader(BITSTREAM *input, FILE *logfile)
{
TAGVALUE segment;
int lowpass_width = 0;
int lowpass_height = 0;
// Parse the sample header until the lowpass band is found
while (lowpass_width == 0 && lowpass_height == 0)
{
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
// Check that the tag is valid
assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED);
switch (segment.tuple.tag)
{
case CODEC_TAG_SAMPLE:
fprintf(logfile, "Sample type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_WIDTH:
fprintf(logfile, "Frame width: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_HEIGHT:
fprintf(logfile, "Frame height: %d\n", segment.tuple.value);
break;
case CODEC_TAG_LOWPASS_WIDTH:
lowpass_width = segment.tuple.value;
fprintf(logfile, "Lowpass width: %d\n", lowpass_width);
break;
case CODEC_TAG_LOWPASS_HEIGHT:
lowpass_height = segment.tuple.value;
fprintf(logfile, "Lowpass height: %d\n", lowpass_height);
break;
case CODEC_TAG_TRANSFORM_TYPE:
fprintf(logfile, "Transform type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_INPUT_FORMAT:
fprintf(logfile, "Input format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
fprintf(logfile, "Encoded format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_NUMBER:
fprintf(logfile, "Frame number: %d\n", segment.tuple.value);
break;
}
}
return true;
}
int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work
{
TAGWORD tag,value=1;
unsigned char *pos = NULL;
int readsize = input->nWordsUsed;
if(readsize > 4096) // only need to scan the first few tuplets
{
readsize = 4096;
}
else
{
//Tiny therefore P-frame, nothing to be read so:
value=decoder->real_channels; // return the last value.
return value;
}
pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value);
if(pos && value>1 && skip_to_channel>1)
{
int chunksize = 0;
intptr_t offset;
int count = 0;
do
{
tag = *pos++<<8;
tag |= *pos++;
value = *pos++<<8;
value |= *pos++;
if (tag < 0)
{
tag = NEG(tag);
}
} while((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10);
if((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize*4;
input->lpCurrentWord += offset;
input->nWordsUsed -= (int)offset;
{
uint8_t *tag = (uint8_t *)input->lpCurrentWord;
// Search for first sample of the next frame
while((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0)
{
input->lpCurrentWord += 4;
input->nWordsUsed -= 4;
tag += 4;
}
}
}
}
//if(value == 0) value = 1; // old non-stereo file
return value;
}
#define SUBPIXEL 64
static short gains[SUBPIXEL+1][4] = {
{0*128,0*128,0x7fff,0*128},
{0*128,2*128,0x7fff,-2*128},
{0*128,5*128,255*128,-4*128},
{0*128,8*128,254*128,-6*128},
{0*128,11*128,253*128,-8*128},
{0*128,14*128,252*128,-10*128},
{0*128,18*128,250*128,-12*128},
{0*128,21*128,248*128,-13*128},
{-1*128,25*128,247*128,-15*128},
{-1*128,29*128,244*128,-16*128},
{-1*128,33*128,241*128,-17*128},
{-2*128,37*128,239*128,-18*128},
{-2*128,41*128,236*128,-19*128},
{-3*128,46*128,233*128,-20*128},
{-3*128,50*128,229*128,-20*128},
{-4*128,55*128,226*128,-21*128},
{-4*128,60*128,221*128,-21*128},
{-5*128,65*128,217*128,-21*128},
{-5*128,70*128,213*128,-22*128},
{-6*128,75*128,209*128,-22*128},
{-7*128,80*128,205*128,-22*128},
{-7*128,85*128,199*128,-21*128},
{-8*128,91*128,194*128,-21*128},
{-9*128,96*128,190*128,-21*128},
{-10*128,102*128,185*128,-21*128},
{-10*128,107*128,179*128,-20*128},
{-11*128,113*128,174*128,-20*128},
{-12*128,118*128,169*128,-19*128},
{-13*128,124*128,164*128,-19*128},
{-14*128,129*128,159*128,-18*128},
{-14*128,135*128,152*128,-17*128},
{-15*128,141*128,147*128,-17*128},
{-16*128,144*128,144*128,-16*128},
{-17*128,147*128,141*128,-15*128},
{-17*128,152*128,135*128,-14*128},
{-18*128,159*128,129*128,-14*128},
{-19*128,164*128,124*128,-13*128},
{-19*128,169*128,118*128,-12*128},
{-20*128,174*128,113*128,-11*128},
{-20*128,179*128,107*128,-10*128},
{-21*128,185*128,102*128,-10*128},
{-21*128,190*128,96*128,-9*128},
{-21*128,194*128,91*128,-8*128},
{-21*128,199*128,85*128,-7*128},
{-22*128,205*128,80*128,-7*128},
{-22*128,209*128,75*128,-6*128},
{-22*128,213*128,70*128,-5*128},
{-21*128,217*128,65*128,-5*128},
{-21*128,221*128,60*128,-4*128},
{-21*128,226*128,55*128,-4*128},
{-20*128,229*128,50*128,-3*128},
{-20*128,233*128,46*128,-3*128},
{-19*128,236*128,41*128,-2*128},
{-18*128,239*128,37*128,-2*128},
{-17*128,241*128,33*128,-1*128},
{-16*128,244*128,29*128,-1*128},
{-15*128,247*128,25*128,-1*128},
{-13*128,248*128,21*128,0*128},
{-12*128,250*128,18*128,0*128},
{-10*128,252*128,14*128,0*128},
{-8*128,253*128,11*128,0*128},
{-6*128,254*128,8*128,0*128},
{-4*128,255*128,5*128,0*128},
{-2*128,0x7fff,2*128,0*128},
{0*128,0*128,0x7fff,0*128}
};
static int lanczos[256] =
{
0,
-2,
-8,
-18,
-33,
-53,
-77,
-106,
-141,
-179,
-223,
-272,
-325,
-384,
-447,
-514,
-586,
-662,
-742,
-826,
-913,
-1004,
-1097,
-1193,
-1290,
-1389,
-1490,
-1591,
-1692,
-1792,
-1892,
-1990,
-2086,
-2179,
-2269,
-2355,
-2436,
-2511,
-2580,
-2643,
-2697,
-2744,
-2781,
-2809,
-2826,
-2832,
-2826,
-2808,
-2776,
-2730,
-2670,
-2594,
-2503,
-2395,
-2271,
-2129,
-1969,
-1790,
-1593,
-1377,
-1141,
-886,
-611,
-315,
0,
336,
692,
1069,
1466,
1884,
2321,
2778,
3255,
3750,
4265,
4797,
5347,
5914,
6498,
7097,
7711,
8340,
8982,
9636,
10301,
10977,
11663,
12357,
13058,
13765,
14477,
15192,
15910,
16630,
17349,
18066,
18781,
18871,
19580,
20285,
20986,
21678,
22361,
23035,
23697,
24348,
24983,
25604,
26206,
26790,
27354,
27898,
28419,
28915,
29387,
29832,
30249,
30638,
30997,
31326,
31623,
31886,
32117,
32314,
32476,
32603,
32695,
32749,
32767, //was 32768, issue for SSE2
32749,
32695,
32603,
32476,
32314,
32117,
31886,
31623,
31326,
30997,
30638,
30249,
29832,
29387,
28915,
28419,
27898,
27354,
26790,
26206,
25604,
24983,
24348,
23697,
23035,
22361,
21678,
20986,
20285,
19580,
18871,
18159,
18066,
17349,
16630,
15910,
15192,
14477,
13765,
13058,
12357,
11663,
10977,
10301,
9636,
8982,
8340,
7711,
7097,
6498,
5914,
5347,
4797,
4265,
3750,
3255,
2778,
2321,
1884,
1466,
1069,
692,
336,
0,
-315,
-611,
-886,
-1141,
-1377,
-1593,
-1790,
-1969,
-2129,
-2271,
-2395,
-2503,
-2594,
-2670,
-2730,
-2776,
-2808,
-2826,
-2832,
-2826,
-2809,
-2781,
-2744,
-2697,
-2643,
-2580,
-2511,
-2436,
-2355,
-2269,
-2179,
-2086,
-1990,
-1892,
-1792,
-1692,
-1591,
-1490,
-1389,
-1290,
-1193,
-1097,
-1004,
-913,
-826,
-742,
-662,
-586,
-514,
-447,
-384,
-325,
-272,
-223,
-179,
-141,
-106,
-77,
-53,
-33,
-18,
-8,
-2,
};
void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom)
{
float yposf,ystepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
offset = -offset;
yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
step = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
step = 32;
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
__m128i half;
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outline128 = (__m128i *)dst;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = o128;
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
_mm_storeu_si128(outline128++, o128);
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
/*ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int r,g,b,yp = ((int)yposf);
yposf += ystepf;
if(yp<0 || yp>= height)
{
memset(dst, 0, widthbytes);
}
else
{
memcpy(dst, &ptr[widthbytes*yp], widthbytes);
}
dst += pitch;
}*/
}
}
void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom, int xx)
{
float yposf,ystepf;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos;
uint8_t *outlinePos8;
uint16_t *outlinePos16;
offset = -offset;
//yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
step = 4;
break;
case DECODED_FORMAT_RGB24:
step = 3;
break;
case DECODED_FORMAT_YUYV:
step = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
step = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
step = 6;
break;
default:
assert(0);
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outlinePos8 = (uint8_t *)dst;
outlinePos16 = (uint16_t *)dst;
lineAPos = (uint8_t *)scanline[0];
lineBPos = (uint8_t *)scanline[1];
lineCPos = (uint8_t *)scanline[2];
lineDPos = (uint8_t *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_WP13:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RG64:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_RG48:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=4;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=4;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=4;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=4;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0);
outlinePos8[1] = _mm_extract_epi16(o128, 1);
outlinePos8[2] = _mm_extract_epi16(o128, 2);
outlinePos8[3] = _mm_extract_epi16(o128, 3);
outlinePos8+=4;
break;
case DECODED_FORMAT_RGB24:
{
int r,g,b;
b = ((lineAPos[0] * gains[0])>>7) +
((lineBPos[0] * gains[1])>>7) +
((lineCPos[0] * gains[2])>>7) +
((lineDPos[0] * gains[3])>>7); //16-bit
g = ((lineAPos[1] * gains[0])>>7) +
((lineBPos[1] * gains[1])>>7) +
((lineCPos[1] * gains[2])>>7) +
((lineDPos[1] * gains[3])>>7); //16-bit
r = ((lineAPos[2] * gains[0])>>7) +
((lineBPos[2] * gains[1])>>7) +
((lineCPos[2] * gains[2])>>7) +
((lineDPos[2] * gains[3])>>7); //16-bit
if(r<0) r = 0; if(r>65535) r = 65535;
if(g<0) g = 0; if(g>65535) g = 65535;
if(b<0) b = 0; if(b>65535) b = 65535;
lineAPos+=3;
lineBPos+=3;
lineCPos+=3;
lineDPos+=3;
outlinePos8[0] = b >> 8; //b
outlinePos8[1] = g >> 8; //g
outlinePos8[2] = r >> 8; //r
outlinePos8+=3;
/* SSE2 can't load byte alligned
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0); //b
outlinePos8[1] = _mm_extract_epi16(o128, 1); //g
outlinePos8[2] = _mm_extract_epi16(o128, 2); //r
outlinePos8+=3;
*/
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
}
}
void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset)
{
float yposf,remainf;
int yposi,tablepos,x,y;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline[4], *tline;
int spitch = pitch/2;
int neg = 0,shift = 0,skip,step;
int origwidthbytes = widthbytes;
int origwidthextra;
__m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
// offset = -offset;
if(offset < 0.0)
neg = 1;
yposf = height * offset;
yposi = (int)floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
// -3 , 0 best small notch at zero?
//
if(neg)
{
yposi -= 2;
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
}
else
{
yposi -= 1; //offset inherent in the table
gainD = gains[tablepos][0];
gainC = gains[tablepos][1];
gainB = gains[tablepos][2];
gainA = gains[tablepos][3];
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
skip = 4;
step = 16;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
step = 16;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
step = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
default:
skip = 6;
step = 32;
break;
}
// scanline[0] = buffer;
// scanline[1] = buffer + width*skip/2;
// scanline[2] = buffer + width*skip/2*2;
// scanline[3] = buffer + width*skip/2*3;
widthbytes += (step - 1);
widthbytes -= (widthbytes % step);
origwidthextra = (origwidthbytes % step);
scanline[0] = buffer;
scanline[1] = buffer + widthbytes/2;
scanline[2] = buffer + widthbytes/2*2;
scanline[3] = buffer + widthbytes/2*3;
for(y=0; y<4; y++)
{
if(yposi+y >=0 && yposi+y<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-yposi-y)*spitch;
else
ptr += (yposi+y)*spitch;
memcpy(scanline[y], ptr, origwidthbytes);
}
else
{
memset(scanline[y], 0, origwidthbytes);
}
}
{
for(y=0;y<height; y++)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-y-1)*spitch;
else
ptr += y*spitch;
outline128 = (__m128i *)ptr;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
//for(x=0;x<width*skip/2; x+=step)
for(x=0;x<widthbytes; x+=step)
{
__m128i half;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip == 6) //RGB48 || WP13
{
if(widthbytes == origwidthbytes || x+16 < origwidthbytes)
_mm_storeu_si128(outline128++, o128);
else
{
//if(x < origwidthbytes+16/*bytes in an SSE2 reg*/)
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra);
outline128++;
}
}
else
{
half = o128;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip != 6) //!RGB48 || !WP13
{
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
}
if(widthbytes == origwidthbytes || x+32 < origwidthbytes)
{
_mm_storeu_si128(outline128++, o128);
}
else
{
//if(x+16 < origwidthbytes+16)
if(origwidthextra > 16)
{
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16);
}
outline128++;
}
}
tline = scanline[0];
scanline[0] = scanline[1];
scanline[1] = scanline[2];
scanline[2] = scanline[3];
scanline[3] = tline;
if(yposi+y+4 >=0 && yposi+y+4<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-(yposi+y+4))*spitch;
else
ptr += (yposi+y+4)*spitch;
memcpy(scanline[3], ptr, origwidthbytes);
}
else
{
memset(scanline[3], 0, origwidthbytes);
}
}
}
}
void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*3*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
// int holdstart = width*5/10; // Use to specify a area of uniform stretch
// int holdend = width*5/10;
int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
float flatxstep;
float modified_xstep_avg;
float bottomxstep;
float basexstepstart;
float basexstepend;
float range;
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
if(holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
range = (float)(holdend - holdstart);
flatxstep = xstep-z*0.5f*xstep;
modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range);
bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg);
if(holdstart == (width-holdend))
{
basexstepstart = bottomxstep;
basexstepend = bottomxstep;
}
else if(holdstart < (width-holdend))
{
float a = (float)holdstart / (float)(width-holdend);
float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float endavg = (modified_xstep_avg * ((float)width-range) - startavg * (float)holdstart) / (float)(width-holdend);
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
else
{
float a = (float)(width-holdend) / (float)holdstart;
float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float startavg = (modified_xstep_avg * ((float)width-range) - endavg * (float)(width-holdend)) / (float)holdstart;
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4 && xx < (width-1)*3) //We need 3 values for RGB< yet we write 4, so the last pixel can't be done with MMX
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
#if 0 //Why is this not used?
void RGB48HoriShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,remainf,xstepf;
int xposi,tablepos,x;
int Ra,Rb,Rc,Rd;
int Ga,Gb,Gc,Gd;
int Ba,Bb,Bc,Bd;
int gainA,gainB,gainC,gainD;
int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0,shift = 0;
float offset = hoffset;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0 + frameTilt;
}
else
{
zoom /= 1.0 + frameTilt;
}
xposf = (float)width*(0.5 - 1.0/(2.0*zoom) - offset);
xposf -= width * roffset * 0.5 / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0/zoom;
memcpy(scanline, RGB48, width*3*2);
{
unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = xposf * 65536.0;
int ixstep = xstepf * 65536.0;
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0)*2.0;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * sscanline[(xp-1)*3]);
g += (gains * sscanline[(xp-1)*3+1]);
b += (gains * sscanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * scanline[(xp-1)*3]);
g += (gains * scanline[(xp-1)*3+1]);
b += (gains * scanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
/*
memcpy(scanline, RGB48, width*3*2);
{
for(x=0;x<width*3; x+=3) //RGB
{
int r,g,b,xp = ((int)xposf)*3;
xposf += xstepf;
if(xp<0 || xp>= width*3)
{
RGB48[x] = 0;
RGB48[x+1] = 0;
RGB48[x+2] = 0;
}
else
{
r = scanline[xp];
g = scanline[xp+1];
b = scanline[xp+2];
RGB48[x] = r;
RGB48[x+1] = g;
RGB48[x+2] = b;
}
}
}
*/
//_mm_empty();
}
#endif
void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 4;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f;
xposf += line * (width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*4*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5f*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*4;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*4]);
g += (gains * sscanline[xp*4+1]);
b += (gains * sscanline[xp*4+2]);
a += (gains * sscanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*4; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*4]);
g += (gains * scanline[xp*4+1]);
b += (gains * scanline[xp*4+2]);
a += (gains * scanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask)
{
float line = (float)width * fabsf(windowMask);
int pixelbytes = 6;
float frac = (float)(line-(float)((int)line));
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
pixelbytes = 8;
break;
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed
{
short *ptrL = (short *)RGB48;
short *ptrR = (short *)RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
else
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
}
void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t1,t2,t3;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
ptrL += 3;
ptrR -= 3;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
memcpy(ptr, RGB48, (nwidth)*3*2);
ptr += (nwidth)*3;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*3];//r
*ptr++ = RGB48[(x+xposi-2)*3+1];//g
*ptr++ = RGB48[(x+xposi-2)*3+2];//b
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
ptr += (width-xposi)*3;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t1,t2,t3,t4;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
t4 = ptrL[2];
ptrL[3] = ptrR[3];
ptrR[3] = t4;
ptrL += 4;
ptrR -= 4;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
memcpy(ptr, RGB48, (nwidth)*4*2);
ptr += (nwidth)*4;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*4];//r
*ptr++ = RGB48[(x+xposi-2)*4+1];//g
*ptr++ = RGB48[(x+xposi-2)*4+2];//b
*ptr++ = RGB48[(x+xposi-2)*4+3];//a
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
memcpy(ptr, &RGB48[xposi*4], (width-xposi)*4*2);
ptr += (width-xposi)*4;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2,
//l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4,
//l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6,
//l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8,
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*4; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0
//t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4
//t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4
//l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0
//t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0
//t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4
//t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4
//l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_srli_si128(l2,4*2);
t2 = _mm_slli_si128(t2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0
//t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5
//t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l3,4*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width,
float offsetR, float offsetG, float offsetB ,
int flipR, int flipG, int flipB)
{
float Rxposf,Rremainf;
int Rxposi,Rtablepos;
float Gxposf,Gremainf;
int Gxposi,Gtablepos;
float Bxposf,Bremainf;
int Bxposi,Btablepos;
int x;
int RgainA,RgainB,RgainC,RgainD;
int GgainA,GgainB,GgainC,GgainD;
int BgainA,BgainB,BgainC,BgainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int negR = 0;
int negG = 0;
int negB = 0;
int shift = 0;
__m128i l1,l2,l3,o128,t1,t2;
__m128i *line128, *outline128;
__m128i gA1,gB1,gC1,gD1,gA2,gB2,gC2,gD2,gA3,gB3,gC3,gD3;
if(flipR)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipG)
{
unsigned short *ptrL = &RGB48[1];
unsigned short *ptrR = &RGB48[1];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipB)
{
unsigned short *ptrL = &RGB48[2];
unsigned short *ptrR = &RGB48[2];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(offsetR < 0.0)
negR = 1;
if(offsetG < 0.0)
negG = 1;
if(offsetB < 0.0)
negB = 1;
Rxposf = width * offsetR;
Rxposi = (int)floorf(Rxposf);
Rremainf = Rxposf - (float)Rxposi;
Rtablepos = (int)(Rremainf*(float)SUBPIXEL);
Gxposf = width * offsetG;
Gxposi = (int)floorf(Gxposf);
Gremainf = Gxposf - (float)Gxposi;
Gtablepos = (int)(Gremainf*(float)SUBPIXEL);
Bxposf = width * offsetB;
Bxposi = (int)floorf(Bxposf);
Bremainf = Bxposf - (float)Bxposi;
Btablepos = (int)(Bremainf*(float)SUBPIXEL);
Rxposi = abs(Rxposi);
Gxposi = abs(Gxposi);
Bxposi = abs(Bxposi);
if(Rxposi==0 && Rtablepos == 0)
return; // no move required
RgainA = gains[Rtablepos][0];
RgainB = gains[Rtablepos][1];
RgainC = gains[Rtablepos][2];
RgainD = gains[Rtablepos][3];
GgainA = gains[Gtablepos][0];
GgainB = gains[Gtablepos][1];
GgainC = gains[Gtablepos][2];
GgainD = gains[Gtablepos][3];
BgainA = gains[Btablepos][0];
BgainB = gains[Btablepos][1];
BgainC = gains[Btablepos][2];
BgainD = gains[Btablepos][3];
if(negR == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Rxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Rxposi+2;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Rxposi-2>=0)
{
*ptr++ = RGB48[(x+Rxposi-2)*3];//r
ptr++;//g
ptr++;//b
}
else
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Rxposi;x<width;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<Rxposi+16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
if(negG == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Gxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Gxposi+2;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Gxposi-2>=0)
{
ptr++;//r
*ptr++ = RGB48[(x+Gxposi-2)*3+1];//g
ptr++;//b
}
else
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Gxposi;x<width;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<Gxposi+16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
if(negB == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Bxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Bxposi+2;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Bxposi-2>=0)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[(x+Bxposi-2)*3+2];//b
}
else
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Bxposi;x<width;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<Bxposi+16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
gA1 = _mm_set_epi16(RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA);
gA2 = _mm_set_epi16(BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA);
gA3 = _mm_set_epi16(GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA);
gB1 = _mm_set_epi16(RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB);
gB2 = _mm_set_epi16(BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB);
gB3 = _mm_set_epi16(GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB);
gC1 = _mm_set_epi16(RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC);
gC2 = _mm_set_epi16(BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC);
gC3 = _mm_set_epi16(GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC);
gD1 = _mm_set_epi16(RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD);
gD2 = _mm_set_epi16(BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD);
gD3 = _mm_set_epi16(GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA1);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD1);
o128 = _mm_adds_epi16(o128,t1);
t1 = gA1;
gA1 = gA2;
gA2 = gA3;
gA3 = t1;
t1 = gB1;
gB1 = gB2;
gB2 = gB3;
gB3 = t1;
t1 = gC1;
gC1 = gC2;
gC2 = gC3;
gC3 = t1;
t1 = gD1;
gD1 = gD2;
gD2 = gD3;
gD3 = t1;
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint)
{
int x,val,ypos=0,upos=1,vpos=3;
int step = 1,pos=0;
short *ssbase = (short *)sbase;
uint32_t *lbase = (uint32_t *)sbase;
ToolsHandle *tools = decoder->tools;
int scaledvectorscope = 0;
if(tools == NULL)
return;
if(whitepoint == 13)
{
if(format == DECODED_FORMAT_RG64)
format = DECODED_FORMAT_W13A;
else
format = DECODED_FORMAT_WP13;
}
while(width/step > 360)
{
step*=2;
}
tools->waveformWidth = width/step;
decoder->tools->blurUVdone = 0;
switch(format & 0xffffff)
{
case DECODED_FORMAT_WP13:
decoder->tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*3;
}
break;
case DECODED_FORMAT_W13A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*4;
}
break;
case DECODED_FORMAT_RG48:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[0]>>8;
G = sbase[1]>>8;
B = sbase[2]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*3;
}
break;
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG30:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_AR10:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
B = (val>>22)&0xff;
G = (val>>12)&0xff;
R = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_R210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_DPX0:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>24)&0xff;
G = (val>>14)&0xff;
B = (val>>04)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[1]>>8;
G = sbase[2]>>8;
B = sbase[3]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*4;
}
break;
case COLOR_FORMAT_UYVY:
ypos=1,upos=0,vpos=2;
case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109
case COLOR_FORMAT_YUYV:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 2;
Y = bptr[ypos]-16;
U = bptr[upos]-128;
Y+= bptr[ypos+2]-16; Y>>=1;
V = bptr[vpos]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
//TODO much -20 to 120 RGB range.
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
//* 255.0/314.0
//* 255.0/244.0
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_YU64:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
bptr++; //read only the high byte out of the 16-bit
Y = bptr[0]-16;
V = bptr[2]-128;
Y+= bptr[4]-16; Y>>=1;
U = bptr[6]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_V210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint32_t *lptr = (uint32_t *)sbase;
lptr += (x/6)*4;
switch(x % 6)
{
case 0:
V = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
U = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 1:
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
V = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
lptr--;
U = ((*lptr>>22) & 0xff) - 128;
break;
case 2:
lptr++;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y+= ((*lptr>>12) & 0xff) - 16; Y>>=1;
V = ((*lptr>>22) & 0xff) - 128;
break;
case 3:
lptr++;
V = ((*lptr>>12) & 0xff) - 128;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 4:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
U = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
break;
case 5:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
U = ((*lptr>>12) & 0xff) - 128;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
}
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB24:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 3;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB32:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_BYR2:
case COLOR_FORMAT_BYR4:
//do nothing
break;
default:
assert(0);
#if (0 && DEBUG)
fprintf(stderr,"decoder.HistogramLine: Unsupported pixel format\n");
#endif
break;
}
}
void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR)
{
#if 1
int x,RL,GL,BL,RR,GR,BR;
int nRL,nGL,nBL;
int nRR,nGR,nBR;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
RL = sbaseL[0]>>6;
GL = sbaseL[1]>>6; //10-bit
BL = sbaseL[2]>>6;
RL*=RL;
GL*=GL; //20-bit
BL*=BL;
RR = sbaseR[0]>>6;
GR = sbaseR[1]>>6; //10-bit
BR = sbaseR[2]>>6;
RR*=RR;
GR*=GR; //20-bit
BR*=BR;
nRL = RL*(1023-ileakL) + ileakL*max - RR*ileakL; //30-bit
nGL = GL*(1023-ileakL) + ileakL*max - GR*ileakL;
nBL = BL*(1023-ileakL) + ileakL*max - BR*ileakL;
nRL >>= 10; //20-bit
nGL >>= 10;
nBL >>= 10;
if(nRL>max) nRL=max; if(nRL<0) nRL=0;
if(nGL>max) nGL=max; if(nGL<0) nGL=0;
if(nBL>max) nBL=max; if(nBL<0) nBL=0;
if(sqrttable[nRL] == 65535)
sqrttable[nRL] = (int)sqrt(nRL);
if(sqrttable[nGL] == 65535)
sqrttable[nGL] = (int)sqrt(nGL);
if(sqrttable[nBL] == 65535)
sqrttable[nBL] = (int)sqrt(nBL);
sbaseL[0] = sqrttable[nRL]<<6;
sbaseL[1] = sqrttable[nGL]<<6;
sbaseL[2] = sqrttable[nBL]<<6;
sbaseL += 3;
nRR = RR*(1023-ileakR) + ileakR*max - RL*ileakR; //30-bit
nGR = GR*(1023-ileakR) + ileakR*max - GL*ileakR;
nBR = BR*(1023-ileakR) + ileakR*max - BL*ileakR;
nRR >>= 10; //20-bit
nGR >>= 10;
nBR >>= 10;
if(nRR>max) nRR=max; if(nRR<0) nRR=0;
if(nGR>max) nGR=max; if(nGR<0) nGR=0;
if(nBR>max) nBR=max; if(nBR<0) nBR=0;
if(sqrttable[nRR] == 65535)
sqrttable[nRR] = (int)sqrt(nRR);
if(sqrttable[nGR] == 65535)
sqrttable[nGR] = (int)sqrt(nGR);
if(sqrttable[nBR] == 65535)
sqrttable[nBR] = (int)sqrt(nBR);
sbaseR[0] = sqrttable[nRR]<<6;
sbaseR[1] = sqrttable[nGR]<<6;
sbaseR[2] = sqrttable[nBR]<<6;
sbaseR += 3;
}
#else // works and fast but has not image linearization, not as good
__m128i *ptrL = (__m128i *)sbaseL;
__m128i *ptrR = (__m128i *)sbaseR;
__m128i t,L,R,nL,nR;
int x,width8 = (width*3) & ~7;
__m128i white_epi16 = _mm_set1_epi16(32767);
__m128i leak_epi16 = _mm_set1_epi16(ileak>>1);
__m128i oneNegLeak_epi16 = _mm_set1_epi16(32767-(ileak>>1));
for(x=0;x<width8;x+=8)
{
L = _mm_load_si128(ptrL);
R = _mm_load_si128(ptrR);
L = _mm_srli_epi16(L,1); //15-bit
R = _mm_srli_epi16(R,1); //15-bit
nL = _mm_mulhi_epi16(L, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nL = _mm_adds_epi16(nL, t);
t = _mm_mulhi_epi16(R, leak_epi16);
nL = _mm_subs_epu16(nL, t);
nR = _mm_mulhi_epi16(R, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nR = _mm_adds_epi16(nR, t);
t = _mm_mulhi_epi16(L, leak_epi16);
nR = _mm_subs_epu16(nR, t);
L = _mm_slli_epi16(nL,2);
R = _mm_slli_epi16(nR,2);
_mm_store_si128(ptrL++, L);
_mm_store_si128(ptrR++, R);
}
#endif
}
void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
#if 1
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - ((G+B)>>1)*ileakL; //30-bit
nG = G*(1023-ileakR) + ileakR*max - R*ileakR;
nB = B*(1023-ileakR) + ileakR*max - R*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
#elif 0
int x;
float R,G,B;
float nR,nG,nB;
float fleakL = (float)ileakL / 65535.0;
float fleakR = (float)ileakR / 65535.0;
for(x=0;x<width;x++)
{
R = sbase[0];
G = sbase[1];
B = sbase[2];
R /= 65535.0;
G /= 65535.0;
B /= 65535.0;
R *= R;
G *= G;
B *= B;
nR = R*(1.0-fleakL) + fleakL - (G+B)*0.5*fleakL;
nG = G*(1.0-fleakR) + fleakR - R*fleakR;
nB = B*(1.0-fleakR) + fleakR - R*fleakR;
if(nR<0) nR=0;
if(nG<0) nG=0;
if(nB<0) nB=0;
nR = sqrt(nR);
nG = sqrt(nG);
nB = sqrt(nB);
sbase[0] = nR * 65535.0;
sbase[1] = nG * 65535.0;
sbase[2] = nB * 65535.0;
sbase += 3;
}
#elif 0
__m128i RGBRGB,rgb_epi32,RGB1,RGB2;
__m128i zero_epi128 = _mm_setzero_si128();
int x,width6 = (width*3) / 6 * 6;
__m128 white_ps = _mm_set1_ps(1.0);
__m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakL/65536.0));
__m128 leak_ps = _mm_set_ps((float)ileakL/65536.0, (float)ileakR/65536.0, (float)ileakR/65536.0, (float)ileakL/65536.0);
__m128 scale_ps = _mm_set1_ps(65535.0);
__m128 scalehalf_ps = _mm_set1_ps(32767.0);
__m128 zero_ps = _mm_set1_ps(0.0);
__m128 rgb_ps, alt_rgb_ps;
__m128i sub_epi32;
__m128 sub_ps;
for(x=0;x<width6;x+=6) // two RGB pairs
{
int R,G,B;
RGBRGB = _mm_loadu_si128((__m128i *)sbase);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB1 = _mm_cvtps_epi32(rgb_ps);
RGB1 = _mm_packs_epi32 (RGB1, zero_epi128);
RGB1 = _mm_slli_si128(RGB1, 10);
RGB1 = _mm_srli_si128(RGB1, 10);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB2 = _mm_cvtps_epi32(rgb_ps);
RGB2 = _mm_packs_epi32 (RGB2, zero_epi128);
RGB2 = _mm_slli_si128(RGB2, 6);
RGB1 = _mm_adds_epi16(RGB1, RGB2);
RGB1 = _mm_slli_epi16(RGB1, 1);
RGB1 = _mm_slli_si128(RGB1, 4);
RGB1 = _mm_srli_si128(RGB1, 4);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
RGBRGB = _mm_slli_si128(RGBRGB, 12);
RGBRGB = _mm_adds_epi16(RGB1, RGBRGB);
_mm_storeu_si128((__m128i *)sbase, RGBRGB);
sbase += 6;
}
#endif
}
void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - B*ileakL;
nG = G*(1023-ileakL) + ileakL*max - B*ileakL;
nB = B*(1023-ileakR) + ileakR*max - ((R+G)>>1)*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - G*ileakL;
nG = G*(1023-ileakR) + ileakR*max - ((R+B)>>1)*ileakR;
nB = B*(1023-ileakL) + ileakL*max - G*ileakL;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank)
{
uint16_t *scratchline,*scratchline2,*scratchline3;
uint16_t *sptr;
uint16_t *srclineA,*srclineB;
uint16_t *dstlineA,*dstlineB;
int x,y2;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
int sskip = 3;
uint8_t *bptr1;
uint8_t *bptr2;
uint8_t *baseptr1;
uint8_t *baseptr2;
float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL;
float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR;
float frameTilt = decoder->cfhddata.channel[0].FrameTilt;
float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset;
float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset;
float rotOffset = decoder->cfhddata.channel[1].RotationOffset;
float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset;
float horizOffsetStep = 0;
float horizOffsetStepR = 0;
int flip1=0,flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
int source_pitch1 = source_pitch;
int source_pitch2 = source_pitch;
uint8_t *outputline = output+y*pitch;
uint8_t *outputline2 = NULL;
float horizOffsetBase;
float rotOffsetBase;
float horizOffsetBaseR;
float rotOffsetBaseR;
int formatdone = 0;
float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
float zoom;
float zoomR;
float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom;
float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom;
float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom;
float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom;
float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom;
float frameHDynamic = decoder->cfhddata.FrameHDynamic;
float frameHDynCenter = decoder->cfhddata.FrameHDynCenter;
float frameHDynWidth = decoder->cfhddata.FrameHDynWidth;
float frameHScale = decoder->cfhddata.FrameHScale;
int alphachannel = 0;
int whitepoint = 16;
float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen;
float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen;
float vignette = decoder->cfhddata.channel[0].user_vignette_start;
int flip_LR = 0;
float vig_r1;
float vig_r2;
float vig_gain;
if(blank) // blankline, no shifts required
{
windowMaskL = 0;
windowMaskR = 0;
frameTilt = 0;
horizOffset = 0;
horizOffsetR = 0;
rotOffset = 0;
rotOffsetR = 0;
frameZoom1 = 1.0;
frameZoom2 = 1.0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
frameHScale = 1.0;
frameHDynamic = 1.0;
frameHDynCenter = 0.5;
frameHDynWidth = 0.0;
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(xmax == 0.0) xmax = 1.0;
if(ymax == 0.0) ymax = 1.0;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
if(decoder->source_channels < 2) // 2D
{
channel_flip &= 0x3;
channel_flip |= channel_flip<<2;
decoder->cfhddata.channel_flip = channel_flip;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpenL = 0.0;
blursharpenR = 0.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
horizOffset = rotOffset = 0;
horizOffsetR = rotOffsetR = 0;
frameTilt = 0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
horizOffset += decoder->cfhddata.FrameOffsetX;
horizOffsetR -= decoder->cfhddata.FrameOffsetX;
frameZoom1 += frameHScale - 1.0f;
frameZoom2 += frameHScale - 1.0f;
if(frameHDynamic != 1.0)
{
frameZoom1 += 0.00001f;
frameZoom2 += 0.00001f;
}
if(vignette != 0.0)
{
float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width));
vig_r1 = (vignette+1.0f);
vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end+1.0f);
vig_gain = decoder->cfhddata.channel[0].user_vignette_gain;
vig_r1 *= vig_diag;
vig_r2 *= vig_diag;
}
}
else
{
frameZoom1 = 1.0f;
frameZoom2 = 1.0f;
vignette = 0;
}
zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1;
if(frameDiffZoom2 != 0.0)
zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2;
else
zoomR = 0.0;
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
if(decoder->cfhddata.InvertOffset)
{
rotOffset = -rotOffset;
rotOffsetR = -rotOffsetR;
rotOffset -= decoder->cfhddata.FrameOffsetR;
rotOffsetR -= -decoder->cfhddata.FrameOffsetR;
}
else
{
rotOffset += decoder->cfhddata.FrameOffsetR;
rotOffsetR += -decoder->cfhddata.FrameOffsetR;
}
}
rotOffsetBase = rotOffset;
horizOffsetBase = horizOffset;
rotOffsetBaseR = rotOffsetR;
horizOffsetBaseR = horizOffsetR;
horizOffset -= rotOffset * 0.5f;
horizOffsetStep = rotOffset / (float)height;
horizOffsetR -= rotOffsetR * 0.5f;
horizOffsetStepR = rotOffsetR / (float)height;
horizOffset += horizOffsetStep * y;
horizOffsetR += horizOffsetStepR * y;
assert(bufferremain >= width * 8 * 2 * 2);
baseptr1 = source_buffer;
baseptr2 = source_buffer + channel_offset;
if(channel_flip & 0xf)
{
if(channel_flip & 1)
{
flip1 = 1;
}
if(channel_flip & 4)
{
flip2 = 1;
}
}
if(source_pitch1 < 0)
flip_LR = 1;
decoder->sharpen_flip = 0;
if(channel_flip & 2) //ProcessLine3D
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
}
else
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
}
if(channel_flip & 8)
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
else
{
baseptr2 += source_pitch2*(height-1);
source_pitch2 = -source_pitch2;
}
}
bptr1 = baseptr1 + y*source_pitch1;
bptr2 = baseptr2 + y*source_pitch2;
y2 = y;
if(decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView
{
if(y2 < height/4)
{
blank = 1;
y2 = 0;
}
else
{
y2 -= height/4;
y2 *= 2;
if(y2 >= height-1)
{
blank = 1;
y2 = height - 2;
}
}
bptr1 = baseptr1 + y2*source_pitch1;
bptr2 = baseptr2 + y2*source_pitch2;
}
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 6*2 + width*2) /* as we pad the line */ ;
if(alphachannel)
{
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 8*2 + width*2) /* as we pad the line */ ;
}
dstlineA = sptr = scratchline;
dstlineB = scratchline3;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
whitepoint = 16;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_W13A:
whitepoint = 13;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_WP13:
whitepoint = 13;
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RG48:
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
if(blank)
{
if(srclineA)
memset(srclineA, 0, width*skip);
if(srclineB && decoder->channel_decodes > 1)
memset(srclineB, 0, width*skip);
}
if(blursharpenL != 0.0 || blursharpenR != 0.0)
{
if(decoder->channel_blend_type == BLEND_FREEVIEW ||
decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
decoder->channel_blend_type == BLEND_LINE_INTERLEAVED
)
{
decoder->doVerticalFilter = 0;
}
else
{
decoder->doVerticalFilter = 1;
}
}
{
switch(decoder->channel_blend_type)
{
case BLEND_FREEVIEW:
case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side
if(!blank)
{
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
dstlineA = srclineA;
sptr = dstlineA;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
memcpy(dstlineA+sskip*(width/2), srclineB, width/2*sskip*2);
}
else
{
int16_t *ptr;
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(!alphachannel)
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
dstlineA = srclineA;
ptr = (int16_t *)srclineA;
for(x=0; x<width/2; x++)
{
*ptr++ = (ptr1[0]+ptr1[3])>>1;
*ptr++ = (ptr1[1]+ptr1[4])>>1;
*ptr++ = (ptr1[2]+ptr1[5])>>1 ;
ptr1+=sskip*2;
}
for(; x<width; x++)
{
*ptr++ = (ptr2[0]+ptr2[3])>>1;
*ptr++ = (ptr2[1]+ptr2[4])>>1;
*ptr++ = (ptr2[2]+ptr2[5])>>1;
ptr2+=sskip*2;
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, dstlineA, width/2, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, dstlineA, width/2, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA, width/2, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, dstlineA, dstlineA+width*sskip/2, width/2, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2+width*sskip/2, dstlineA, width*sskip*2/2);
memcpy(dstlineA, dstlineA+width*sskip/2, width*sskip*2/2);
memcpy(dstlineA+width*sskip/2, scratchline2+width*sskip/2, width*sskip*2/2);
}
}
break;
case BLEND_STACKED_ANAMORPHIC: //stacked
case BLEND_LINE_INTERLEAVED: //fields
if((y & 1) == 1) return;
if(!blank)
{
uint16_t *ptrA1 = (uint16_t *)srclineA;
uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1>>1);
uint16_t *ptrB1 = (uint16_t *)srclineB;
uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2>>1);
FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width*skip);
FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width*skip);
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->doVerticalFilter == 0)
{
if(decoder->channel_blend_type==BLEND_STACKED_ANAMORPHIC) //stacked
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline2 = output+(y>>1)*pitch;
outputline = output+((y>>1)+(height/2))*pitch;
}
else
{
outputline = output+(y>>1)*pitch;
outputline2 = output+((y>>1)+(height/2))*pitch;
}
}
else //fields
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline = output+(y)*pitch;
outputline2 = output+(y+1)*pitch;
}
else
{
outputline2 = output+(y)*pitch;
outputline = output+(y+1)*pitch;
}
}
if(flip_LR/*source_pitch1 < 0*/) // flip Left and Right
{
uint8_t *tmp = outputline2;
outputline2 = outputline;
outputline = tmp;
}
}
else
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2, srclineA, width*skip);
memcpy(srclineA, srclineB, width*skip);
memcpy(srclineB, scratchline2, width*skip);
}
}
}
break;
case BLEND_ONION: //onion
case BLEND_DIFFERENCE: //difference
case BLEND_SPLITVIEW: //splitView
if(!blank)
{
//dstlineA = source_buffer;
//dstlineA += (source_pitch>>1) * y;
sptr = dstlineA = srclineA;
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
x = 0;
if(decoder->channel_blend_type == BLEND_SPLITVIEW) //split view
{
int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255;
for(x = xsplit*sskip; x<width*sskip; x++)
{
srclineA[x] = srclineB[x];
}
}
else if(decoder->channel_blend_type == BLEND_ONION) //onion
{
FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width*skip);
}
else if(decoder->channel_blend_type == BLEND_DIFFERENCE) //difference
{
#if XMMOPT
int width8 = (width*sskip) & 0xfff8;
__m128i mid_epi16;
//int unaligned = ((int)sbase) & 15;
//unaligned += ((int)in_rgb8) & 15;
if(whitepoint == 13)
mid_epi16 = _mm_set1_epi16(0x0fff);
else
mid_epi16 = _mm_set1_epi16(0x1fff);
for(x=0; x<width8; x+=8)
{
__m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]);
__m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]);
// 0 to 0xffff
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff
}
else
{
rgb16A = _mm_subs_epi16(rgb16A, rgb16B);
}
rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff
_mm_store_si128((__m128i *)&dstlineA[x], rgb16A);
}
#endif
for(; x<width*sskip; x++)
{
int val;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
val = (srclineB[x] - srclineA[x]) + 32768;
}
else
{
val = (srclineA[x] - srclineB[x]) + 32768;
}
if(val > 0x7fff) val = 0x7fff;
if(val < 0) val = 0;
dstlineA[x] = val;
}
}
}
break;
case BLEND_ANAGLYPH_RC:
case BLEND_ANAGLYPH_RC_BW:
case BLEND_ANAGLYPH_AB:
case BLEND_ANAGLYPH_AB_BW:
case BLEND_ANAGLYPH_GM:
case BLEND_ANAGLYPH_GM_BW:
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
uint16_t *sptr1 = scratchline2;
uint16_t *sptr2 = scratchline3;
dstlineA = (uint16_t *)bptr1;
// dstlineA += (source_pitch>>1) * y;
sptr = dstlineA;
sptr1 = srclineA = (uint16_t *)bptr1;
sptr2 = srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
uint16_t *tmp = srclineA;
srclineA = srclineB;
srclineB = tmp;
}
switch(decoder->channel_blend_type)
{
case BLEND_ANAGLYPH_RC:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_RC_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
int r,g,b;
for(x=0; x<width; x++)
{
r =(ptr1[0]*456 + ptr1[1]*500 + ptr1[2]*176 + ptr2[0]*-43 + ptr2[1]*-88 + ptr2[2]*-2 ) / 1000;
g =(ptr1[0]*-40 + ptr1[1]*-38 + ptr1[2]*-16 + ptr2[0]*378 + ptr2[1]*734 + ptr2[2]*-18 ) / 1000;
b =(ptr1[0]*-15 + ptr1[1]*-21 + ptr1[2]*-5 + ptr2[0]*-72 + ptr2[1]*-113+ ptr2[2]*1226) / 1000;
if(r<0) r=0; if(r>0x3fff) r=0x3fff;
if(g<0) g=0; if(g>0x3fff) g=0x3fff;
if(b<0) b=0; if(b>0x3fff) b=0x3fff;
sptr[0] = r;
sptr[1] = g;
sptr[2] = b;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
break;
}
}
break;
case BLEND_NONE:
default:
if(decoder->channel_decodes == 1) // only one channel
{
if(skip == 8)
{
//the data is already in the correct format
sptr = (unsigned short *)bptr1;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else if(skip == 6)
{
//the data is already in the correct format
dstlineA = sptr = (unsigned short *)srclineA;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
}
if(decoder->channel_current == 0)
{
if(blursharpenL != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
}
}
else
{
if(blursharpenR != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip);
}
}
}
if ((windowMaskL && decoder->channel_current == 0) || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
if(decoder->channel_current != 0) mask = xmin;
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
RGB48WindowMask(decoder, srclineA, width, 0, mask);
}
if ((windowMaskR && decoder->channel_current == 1) || (1.0f-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
if(decoder->channel_current != 1) mask = (1.0f-xmax);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR);
RGB48WindowMask(decoder, srclineA, width, 1, mask);
}
}
else
{
outputline2 = output+(y+height)*pitch;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
else
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
}
break;
}
}
if(!formatdone)
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
flags = 0;
whitebitdepth = 13;
}
if(outputline2)
{
// if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
else
{
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
//{
// if(alphachannel)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth);
// else
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
//}
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
}
}
void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index)
{
uint16_t *sbase;//*sbase2 = NULL;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
//int flip1=0;//flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
//int local_pitch1 = local_pitch;
//int local_pitch2 = local_pitch;
uint8_t *outputline = output+y*pitch;
//uint8_t *outputline2 = NULL;
short *scratch;
//int formatdone = 0;
//float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
//float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
//float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
int alphachannel = 0;
float blursharpen = 0;
int line_max = decoder->frame.height;
int yy = y;
if(decoder->channel_current == 0)
blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
else
blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX)||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpen = 0.0;
}
if(decoder->channel_mix_half_res == 1)
line_max *= 2;
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->sharpen_flip) //SharpenLine
{
//if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1)
{
yy = (line_max - 1 - y);
outputline = output+yy*pitch;
}
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_W13A:
skip = 8;
break;
case DECODED_FORMAT_WP13:
skip = 6;
break;
case DECODED_FORMAT_RG48:
skip = 6;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
scratch = (short*)(buffer + width * skip * thread_index);
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A))
{
int use_pitch = local_pitch;
int edgeclose = 0;
flags = 0;
whitebitdepth = 13;
if(blursharpen != 0.0 && local_pitch != 0)
{
short *Aptr,*Bptr,*Cptr,*Dptr,*Eptr;
switch(decoder->channel_blend_type)
{
case BLEND_STACKED_ANAMORPHIC:
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y * 2;
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
case BLEND_LINE_INTERLEAVED:
sbase = (uint16_t *)local_output;
if(y & 1)
{
y--;
sbase += (local_pitch>>1) * y;
}
else
{
sbase += (local_pitch>>1) * y;
sbase += channel_offset>>1;
}
if(y<=8) edgeclose = 1;
if(y>=4) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=2) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-2) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-4) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-8) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
default:
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 2; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 1; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 1; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 2; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch;
break;
}
if(skip == 8)
{
FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
else
{
FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
sbase = (uint16_t *)scratch;
}
}
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
#if _GRAPHICS
void PaintFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x,y,v,width, height;
int maxR=0,maxG=0,maxB=0;
width = decoder->frame.width;
height = decoder->frame.height;
if(decoder->cfhddata.BurninFlags == 0)
return;
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1) // tools
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
decoder->frame.output_format = output_format;
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1 && decoder->tools) // histogram/scopes/waveform
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->tools->histogram == 0 && decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
{
int avgR=0,avgG=0,avgB=0;
// Post a message to the mailbox
mailbox->output = output;
if(height >= 1080)
{
mailbox->pitch = pitch*4; // only read every 4th scan line
workunits = height/4; // only read every 4th scan line
}
else if(height >= 540)
{
mailbox->pitch = pitch*2; // only read every 2th scan line
workunits = height/2; // only read every 2th scan line
}
else
{
mailbox->pitch = pitch; // read every scan line
workunits = height; // read every scan line
}
if(decoder->tools->histogram == 0)
{
mailbox->jobType = JOB_TYPE_HISTOGRAM; // histogram
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
for(x=0;x<256;x++)
{
avgR += decoder->tools->histR[x];
avgG += decoder->tools->histG[x];
avgB += decoder->tools->histB[x];
//if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
//if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
//if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
avgR /= 256;
avgG /= 256;
avgB /= 256;
//maxR++;
//maxG++;
//maxB++;
decoder->tools->maxR = avgR*3;//maxR;
decoder->tools->maxG = avgG*3;//maxG;
decoder->tools->maxB = avgB*3;//maxB;
}
}
#endif
if(decoder->cfhddata.BurninFlags && DrawOpen(decoder))
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
#if _THREADED
//DrawInit(decoder);
//DrawStartThreaded(decoder);
if(decoder->draw_thread.pool.thread_count > 0)
{
DrawWaitThreaded(decoder);
}
else
#endif
{
DrawInit(decoder);
DrawMetadataObjects(decoder);
}
}
else
{
DrawInit(decoder);
}
if(decoder->drawSafeMarkers)
DrawSafeMarkers(decoder);
if(decoder->cfhddata.BurninFlags & 2) // tools
{
if(decoder->tools)
{
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 16)
DrawGrid(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 2)
DrawHistogram(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 4)
DrawWaveform(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 8)
DrawVectorscope(decoder, 0/*decoder->MDPcurrent.parallax*/);
}
}
DrawScreen(decoder, output, pitch, output_format);
}
#if 0
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & 2 && decoder->tools) // histogram
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
int targetW, targetH;
if(width < 256 || height < 256)
return;
targetW = width / 4;
targetH = height / 8;
mailbox->output = output;
mailbox->pitch = pitch;
workunits = targetW;
mailbox->jobType = JOB_TYPE_BURNINS; // burnin
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
if(decoder->histogram == 0)
{
for(y=0; y<height; y+=4)
{
uint8_t *bptr = output;
bptr += pitch * y;
HistogramLine(decoder, (unsigned short *)bptr, width, output_format);
if(decoder->histogram == 0)
return; // don't know how to create Histogram for that format
}
}
for(x=1;x<255;x++)
{
if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
maxR++;
maxG++;
maxB++;
decoder->maxR = maxR;
decoder->maxG = maxG;
decoder->maxB = maxB;
for(x=0; x<targetW; x++)
{
HistogramRender(decoder, output, pitch, output_format, x, targetW, targetH);
}
#endif
#endif
if(decoder->tools)
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
#endif
extern int geomesh_alloc_cache(void *gm);
#define DEG2RAD(d) (PI*(d)/180.0f)
#define RAD2DEG(r) (180.0f*(r)/PI)
bool approx_equal(int x, int y)
{
if(y > 1080)
{
x >>= 6;
y >>= 6;
}
else if(y > 540)
{
x >>= 5;
y >>= 5;
} else
{
x >>= 4;
y >>= 4;
}
if(x == y || x+1 == y || x == y+1)
return true;
return false;
}
bool approx_equal_float(float x, float y)
{
if (x*0.99 < y && y < x*1.01)
return true;
return false;
}
#if WARPSTUFF
void WarpFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int width, height;
//int maxR = 0, maxG = 0, maxB = 0;
int status = WARPLIB_SUCCESS;
CFHDDATA *cfhddata = &decoder->cfhddata;
int backgroundfill = cfhddata->lensFill;
float sensorcrop = 1.0;
float phi, theta, rho;
int srcLens = HERO4;
if (!cfhddata->doMesh) return;
if (decoder->lastLensOffsetX != cfhddata->LensOffsetX ||
decoder->lastLensOffsetY != cfhddata->LensOffsetY ||
decoder->lastLensOffsetZ != cfhddata->LensOffsetZ ||
decoder->lastLensOffsetR != cfhddata->LensOffsetR ||
decoder->lastLensZoom != cfhddata->LensZoom ||
decoder->lastLensFishFOV != cfhddata->LensFishFOV ||
decoder->lastLensGoPro != cfhddata->lensGoPro ||
decoder->lastLensSphere != cfhddata->lensSphere ||
decoder->lastLensFill != cfhddata->lensFill ||
decoder->lastLensStyleSel != cfhddata->lensStyleSel ||
memcmp(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)) ||
memcmp(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)) )
{
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
width = decoder->frame.width;
height = decoder->frame.height;
if (approx_equal(width, height * 2)) // approx. 2:1
{
float outputaspect = 16.0f/9.0f;
srcLens = EQUIRECT;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
if (cfhddata->lensCustomSRC[1])
{
outputaspect = cfhddata->lensCustomSRC[0] / cfhddata->lensCustomSRC[1];
if (outputaspect >= 1.0f && outputaspect <= 3.0f)
{
//float sourceratio = (float)width / (float)height;
if (approx_equal_float(outputaspect, 4.0f / 3.0f))
sensorcrop = sqrtf((float)(width*width + height*height)) / sqrtf((float)((width * 2 / 3)*(width * 2 / 3) + (height*height)));
if (approx_equal_float(outputaspect, 16.0f / 9.0f)) // 0.88;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
}
}
if (width >= 2496)
decoder->mesh = geomesh_create(199, 99);
else if (width >= 1272)
decoder->mesh = geomesh_create(99, 49);
else
decoder->mesh = geomesh_create(49, 25);
phi = cfhddata->LensOffsetX * DEG2RAD(720.0f); // +-180deg HFOV for 2:1
theta = cfhddata->LensOffsetY * DEG2RAD(720.0f); // +-180deg VFOV for 2:1
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else if (approx_equal(width * 3, height * 4)) // approx. 4:3
{
srcLens = HERO4;
sensorcrop = 1.0;
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(98.0f); // +-49deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else //if(approx_equal(width*9,height*16)) // approx. 16:9
{
srcLens = HERO4;
sensorcrop = sqrtf(1920 * 1920 + 1080 * 1080) / sqrtf(2000 * 2000 + 1500 * 1500); // 3840x2160 from 4000x3000
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60.1deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(70.0f); // +-34.75deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
if ((output_format & 0x7fffffff) == COLOR_FORMAT_YUYV)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_YUY2, width, height, pitch, WARPLIB_FORMAT_YUY2, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RGB32)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_32BGRA, width, height, pitch, WARPLIB_FORMAT_32BGRA, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_W13A)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_W13A, width, height, pitch, WARPLIB_FORMAT_W13A, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_WP13)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_WP13, width, height, pitch, WARPLIB_FORMAT_WP13, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RG48)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_RG48, width, height, pitch, WARPLIB_FORMAT_RG48, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_BGRA64)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_64ARGB, width, height, pitch, WARPLIB_FORMAT_64ARGB, backgroundfill);
else
assert(0);
if (cfhddata->lensSphere == 1)
{
if (cfhddata->lensGoPro != 2) // not outputting EQUIRECT
{
if (cfhddata->LensOffsetR != 0.0)
{
//float angle = 360.0 * asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
float angle = 360.0f * cfhddata->LensOffsetR * cfhddata->LensOffsetR * 2.1f;//asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
if (cfhddata->LensOffsetR < 0.0) angle = -angle;
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
if (cfhddata->LensFishFOV != 0.0) // DeFish
{
float fov = cfhddata->LensFishFOV;// *180.0;
if (fov > 89.9f) fov = 89.9f;
if (fov < -89.9f) fov = -89.9f;
if (fov)
status |= geomesh_transform_defish(decoder->mesh, fov);
}
}
switch (cfhddata->lensGoPro)
{
case 0: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, RECTILINEAR); break;
case 1: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, HERO4); break;
case 2: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, EQUIRECT); break;
case 4:
geomesh_set_custom_lens(decoder->mesh, cfhddata->lensCustomSRC, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
if (srcLens == EQUIRECT) geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, EQUIRECT, CUSTOM_LENS);
else geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, CUSTOM_LENS, CUSTOM_LENS);
break;
}
}
else // old boring geometry
{
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
// basic orthographic moves
if (cfhddata->LensOffsetX != 0.0 || cfhddata->LensOffsetY != 0.0)
geomesh_transform_pan(decoder->mesh, cfhddata->LensOffsetX*(float)width, -cfhddata->LensOffsetY*(float)height);
if (cfhddata->LensOffsetR != 0.0)
{
float angle = 360.0f * asinf(cfhddata->LensOffsetR * 1.7777777777f) / (2.0f * 3.14159f);
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->lensGoPro == 0) //Rectilear
status |= geomesh_transform_gopro_to_rectilinear(decoder->mesh, sensorcrop);
//status |= geomesh_fisheye_gopro_adjustmesh(mesh, &correction_mode, WARPLIB_ALGORITHM_PRESERVE_EVERYTHING,//WARPLIB_ALGORITHM_BEST_FIT,
// width, height, product, model, lens_type, fov, (int)decoder->frame.resolution);
}
geomesh_alloc_cache(decoder->mesh); // required for JOB_TYPE_WARP_CACHE
if (status == WARPLIB_SUCCESS)
{
if (decoder->lens_correct_buffer == NULL)
{
#if _ALLOCATOR
decoder->lens_correct_buffer = (int *)Alloc(decoder->allocator, pitch * height);
#else
decoder->lens_correct_buffer = (int *)MEMORY_ALLOC(pitch * height);
#endif
}
}
else
{
return;
}
/* need resources?
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
*/
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16,
WorkerThreadProc,
decoder);
}
#endif
{
// Post a message to the mailbox
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_CACHE;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#endif
//decoder->frame.output_format = output_format;
decoder->lastLensOffsetX = cfhddata->LensOffsetX;
decoder->lastLensOffsetY = cfhddata->LensOffsetY;
decoder->lastLensOffsetZ = cfhddata->LensOffsetZ;
decoder->lastLensOffsetR = cfhddata->LensOffsetR;
decoder->lastLensZoom = cfhddata->LensZoom;
decoder->lastLensFishFOV = cfhddata->LensFishFOV;
decoder->lastLensGoPro = cfhddata->lensGoPro;
decoder->lastLensSphere = cfhddata->lensSphere;
decoder->lastLensFill = cfhddata->lensFill;
decoder->lastLensStyleSel = cfhddata->lensStyleSel;
memcpy(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC));
memcpy(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
}
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(backgroundfill) // may need to blur the filled in areas
{
mailbox->data = decoder->mesh;
mailbox->output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.width;
mailbox->chunk_size = 16;
mailbox->pitch = pitch;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_BLURV;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else // not threading
{
//geomesh_cache_init_bilinear(decoder->mesh); //bad
geomesh_cache_init_bilinear_range(decoder->mesh, 0, decoder->frame.height); //good
geomesh_apply_bilinear(decoder->mesh, (unsigned char *)output, (unsigned char *)decoder->lens_correct_buffer, 0, decoder->frame.height);
}
#endif
memcpy(output, decoder->lens_correct_buffer, pitch * decoder->frame.height);
/*
if(lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(lens_correct_buffer);
#endif
geomesh_destroy(mesh);
*/
}
void MaskFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x, y, width, height;
int minY, maxY;
int minX, maxX;
CFHDDATA *cfhddata = &decoder->cfhddata;
uint8_t *line = output;
uint32_t fillA = 0;
uint32_t fillB = 0;
int bitsize = 8;
if (!cfhddata->doMesh) return;
width = decoder->frame.width;
height = decoder->frame.height;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 0.0 && decoder->cfhddata.LensXmax == 0.0) return;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 1.0 && decoder->cfhddata.LensXmax == 1.0) return;
minY = (int)(decoder->cfhddata.LensYmin*(float)height);
maxY = (int)(decoder->cfhddata.LensYmax*(float)height);
minX = 0xfffc & (int)(decoder->cfhddata.LensXmin*(float)pitch);
maxX = 0xfffc & (int)(decoder->cfhddata.LensXmax*(float)pitch);
if (FORMATRGB(output_format))
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
memset(line, 0, minX);
memset(line + maxX, 0, pitch - maxX);
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
}
else
{
switch (output_format & 0x7fffffff)
{
case COLOR_FORMAT_YVYU:
case COLOR_FORMAT_YUYV:
fillA = 0x10;
fillB = 0x80;
break;
case COLOR_FORMAT_UYVY:
case COLOR_FORMAT_2VUY:
fillA = 0x80;
fillB = 0x10;
break;
case COLOR_FORMAT_YU64:
fillA = 0x8000;
fillB = 0x1000;
bitsize = 16;
break;
}
}
if (bitsize == 8)
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
for (x = 0; x < minX; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
for (x = maxX; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
}
#endif //#if WARPSTUFF
void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset)
{
uint8_t *local_output_double = local_output;
//Frame_Region emptyFrameMask = {0};
if(decoder->StereoBuffer)
local_output_double = local_output = (uint8_t *)decoder->StereoBuffer;
if(channel_offset < 0) // channel swapped
{
channel_offset = -channel_offset;
}
if(INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format))
{
local_output += local_pitch*(decoder->frame.height-1);
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
local_output_double += local_pitch*(decoder->frame.height*decoder->channel_decodes-1);
else
local_output_double = local_output;
local_pitch = -local_pitch;
}
if(FLIPCOLORS(output_format) || output_format & 0x80000000)
{
decoder->cfhddata.InvertOffset = 1;
}
else
{
decoder->cfhddata.InvertOffset = 0;
}
decoder->frame.format = output_format;
//decoder->frame.colorspace = COLOR_SPACE_CG_601;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[2].FrameTilt))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
// decoder->cfhddata.FrameOffsetX || ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0) ))
{
//int x;
int xbytes, xstep;
//uint8_t *base = local_output;
int width, height, chunk_size;
int fine_vertical = 0;
width = decoder->frame.width;
height = decoder->frame.height;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xbytes = width*8;
xstep = 32;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
xbytes = width*6;
xstep = 32;
break;
default:
assert(0);
break;
}
if(!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION|PROCESSING_FRAMING)) ||
(decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 &&
decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 &&
decoder->cfhddata.FrameOffsetR == 0.0))
{
chunk_size = 8;
}
else
{
chunk_size = 1;
if((fabs(decoder->cfhddata.channel[1].RotationOffset) +
fabs(decoder->cfhddata.channel[1].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 ||
(fabs(decoder->cfhddata.channel[2].RotationOffset) +
fabs(decoder->cfhddata.channel[2].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015)
{
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xstep = 4;
break;
case DECODED_FORMAT_RGB24:
xstep = 3;
break;
case DECODED_FORMAT_YUYV:
xstep = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xstep = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xstep = 6;
break;
}
fine_vertical = 1;
}
}
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
(decoder->frame.resolution == DECODED_RESOLUTION_FULL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) &&
decoder->codec.progressive == false)
{
int interlaced_pitch = local_pitch * 2;
uint8_t *field2_output = local_output + local_pitch;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->chunk_size = chunk_size;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
// Post a message to the mailbox
mailbox->local_output = field2_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
else
{
//TODO Lens corect here.
//call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.)
// JOB_TYPE_HORIZONTAL_3D
//before doing any offset and rotation corrections.
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129
width /= 2;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
decoder->doVerticalFilter = 0;
mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(decoder->doVerticalFilter)
{
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output_double;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_decodes == 2 && decoder->channel_blend_type == 0)
mailbox->line_max *= 2;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else
{
int y,width, height;
uint8_t scratch[4096*16];
int scratchremain = 4096*16;
int ymin = 0, ymax;
width = decoder->frame.width;
height = decoder->frame.height;
ymax = height;
if((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32))
{
ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY;
ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY;
}
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
{
int x,xbytes, xstep;
uint8_t *base = local_output;
float voffsetstep;
float voffset = decoder->cfhddata.channel[1].VerticalOffset;
float roffset = decoder->cfhddata.channel[1].RotationOffset;
float voffset1, voffset2;
float voffsetstep1, voffsetstep2;
int channel_flip = decoder->cfhddata.channel_flip;
int aspectx,aspecty;
float aspectfix;
GetDisplayAspectRatio(decoder, &aspectx, &aspecty);
aspectfix = (float)(aspectx*aspectx) / (float)(aspecty*aspecty);
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
voffset = roffset = 0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
voffset += decoder->cfhddata.FrameOffsetY;
if(decoder->cfhddata.InvertOffset)
{
voffset = -voffset;
roffset = -roffset;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xbytes = width*6;
xstep = 32;
break;
}
//DAN20100923 -- simplied
//voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5;
//voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep);
voffset += roffset * aspectfix * 0.5;
voffsetstep = -roffset * aspectfix / (float)(xbytes/xstep);
if(roffset == 0.0)
xstep = xbytes;
voffset1 = voffset2 = voffset;
voffsetstep1 = voffsetstep2 = voffsetstep;
if(channel_flip & 0xf)
{
if(channel_flip & 2)
{
voffset1 = -voffset1;
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 8)
{
voffset2 = -voffset2;
voffsetstep2 = -voffsetstep2;
}
if(channel_flip & 1)
{
voffset1 += voffsetstep1*(xbytes/xstep);
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 4)
{
voffset2 += voffsetstep2*(xbytes/xstep);
voffsetstep2 = -voffsetstep2;
}
}
for(x=0; x<xbytes; x+=xstep)
{
if(decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
else
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, voffset1);
}
if(decoder->channel_decodes == 2)
{
uint8_t *bptr = base + channel_offset;
RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
base += xstep;
voffset1 += voffsetstep1;
voffset2 += voffsetstep2;
}
}
if(decoder->channel_mix_half_res == 1)
height *= 2;
if(ymin)
{
memset(local_output, 0, abs(local_pitch)); // zero one line;
}
for(y=0; y<ymin; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
for(; y<ymax; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0);
}
for(; y<height; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
}
#endif
}
// Decode a sample from the input bitstream into the output frame buffer
bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0};
int channel_decodes = 1; // 3D Work
int channel_offset = 0;
int channel_mask = 0;
int channel_current = 0;
//int wavelet_index;
bool result = true;
uint8_t *local_output = output;
uint8_t *local_buffer = NULL;
int local_pitch = pitch;
int internal_format = decoder->frame.format;
int output_format = decoder->frame.output_format;
bool use_local_buffer = false;
DECODER *local_decoder = decoder;
//Frame_Region emptyFrameMask = {0};
Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER;
int orig_width = decoder->frame.width;
int orig_height = decoder->frame.height;
decoder->local_output = local_output; // used for NV12 decodes.
decoder->sample_uncompressed = 0; // set if a uncompressed sample is found.
decoder->image_dev_only = 0;
if(decoder->flags & (1<<3)) // This is an image development only decode.
{
decoder->sample_uncompressed = 1;
decoder->image_dev_only = 1;
decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444;
decoder->codec.unique_framenumber = 0; //What should this be?
decoder->frame.white_point = 16; // how to we pass this in?
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer;
switch(output_format & 0x7fffffff)
{
case COLOR_FORMAT_RGB24:
decoder->uncompressed_size = orig_width * orig_height * 3;
break;
case COLOR_FORMAT_RGB32:
decoder->uncompressed_size = orig_width * orig_height * 4;
break;
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_WP13:
decoder->uncompressed_size = orig_width * orig_height * 6;
break;
default:
decoder->uncompressed_size = orig_width * orig_height * 6;
assert(0);
break;
}
}
decoder->frame.alpha_Companded = 0; // reset this state.
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = 0;
decoder->error = CODEC_ERROR_OKAY;
input->error = BITSTREAM_ERROR_OKAY;
// first time through encoded_format is not initized.
if(input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed
{
SAMPLE_HEADER header;
BITSTREAM input2;
InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ);
memset(&header, 0, sizeof(SAMPLE_HEADER));
header.find_lowpass_bands = 2; // help finding the uncompressed flag
if(ParseSampleHeader(&input2, &header))
{
decoder->codec.encoded_format = header.encoded_format;
decoder->sample_uncompressed = header.hdr_uncompressed;
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed;
}
}
if((uintptr_t)input->lpCurrentBuffer & 0x3)
{
if(decoder->aligned_sample_buffer == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16);
#else
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
else
{
if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size)
{
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
}
else
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
}
input->lpCurrentBuffer = decoder->aligned_sample_buffer;
input->lpCurrentWord = decoder->aligned_sample_buffer;
}
#if 0 // Test for missaligning the image data
if(((int)input->lpCurrentBuffer&3) == 0)
{
int i;
uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer;
int missaligned = 1; //2 or 3
for(i=input->dwBlockLength-1; i>=0; i--)
ptr[i+missaligned] = ptr[missaligned];
input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned];
input->lpCurrentWord = (uint8_t *)&ptr[missaligned];
}
#endif
//HACK
// Unfortunately I need color matrix data deep within the codec for RT playback.
if(cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input
{
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER)
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
if (size > sizeof(CFHDDATA)) {
// Limit the size to the known structure
size = sizeof(CFHDDATA);
}
memcpy(&decoder->cfhddata, cfhddata, size);
}
}
else
{
unsigned short value;
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA))
{
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER;
decoder->cfhddata.size = sizeof(CFHDDATA);
if(decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value))
{
if(value == COLOR_FORMAT_RG48)
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(value == COLOR_FORMAT_RG64)
{
decoder->cfhddata.cfhd_subtype = 3; //RGBA
decoder->cfhddata.num_channels = 4;
}
else if(value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END)
{
unsigned int format = BAYER_FORMAT_RED_GRN;
decoder->cfhddata.cfhd_subtype = 1; //BAYER
decoder->cfhddata.bayer_format = format; // default to Red-Grn
decoder->cfhddata.version = CFHDDATA_VERSION;
}
}
}
}
OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed);
if(decoder->image_dev_only) // HACK we need to support 3D also.
decoder->source_channels = 1;
else
decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0);
if(!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override))
{
//int channels = 0;
int channel_blend_type = BLEND_NONE;
int channel_swapped_flags = 0;
if(decoder->cfhddata.MSCTV_Override)
{
channel_mask = decoder->cfhddata.MSCTV_Override&0xff;
channel_blend_type = ((decoder->cfhddata.MSCTV_Override>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override>>16) & 0xffff);
}
else
{
channel_mask = decoder->cfhddata.MSChannel_type_value&0xff;
channel_blend_type = ((decoder->cfhddata.MSChannel_type_value>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value>>16) & 0xffff);
}
if(channel_mask != 3)
{
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
//if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302
{
if(channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 2); // 3D work
}
}
else if(channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 1); // 3D work
}
else
{
//assume second channel decode
SkipVideoChannel(decoder, input, 2); // 3D work
}
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if(channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel
{
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if((channel_mask&3) == 3) // A+B 3d work
{
channel_decodes = 2;
decoder->channel_mix_half_res = 0;
if(channel_blend_type != BLEND_NONE)
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
//if(decoder->frame.format == DECODED_FORMAT_W13A)
// {
// decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
// }
//else
//{
// decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
// }
decoder->frame.format = internal_format = DECODED_FORMAT_RGB32;
local_pitch = decoder->frame.width * 4;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RGB24;
local_pitch = decoder->frame.width * 3; //RGB24
}
/* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(output_format == DECODED_FORMAT_YUYV ||
output_format == DECODED_FORMAT_UYVY))
{
if( channel_blend_type == BLEND_FREEVIEW ||
((channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch = (decoder->frame.width) * 3; //RGB24
}
} */
}
/* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA
{
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}*/
/* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
} */
if( decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW))
{
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->sample_uncompressed)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
}
else
{
if(decoder->preformatted_3D_type > BLEND_NONE)
{
// leave as is.
}
else if(FORMAT8BIT(output_format))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL;
decoder->frame.width /= 2;
local_pitch /= 2;
}
}
}
else
{
if(FORMAT8BIT(output_format))
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
//TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
}
if(channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC)// stacked, side-by-side, fields, Onion, YUY2
{
channel_offset = local_pitch * (decoder->frame.height);
}
else if(channel_blend_type >= BLEND_ANAGLYPH_RC)
{
/* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph
{
//B&W using YUYV
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
}*/
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}
else if(channel_blend_type == BLEND_NONE) // double high
{
channel_offset = pitch * decoder->frame.height;
}
else
{
channel_blend_type = BLEND_STACKED_ANAMORPHIC;
channel_offset = pitch * (decoder->frame.height/2);
}
// fields, stacked, etc, only works on full or half res.
if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED &&
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail.
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
if (channel_blend_type != BLEND_NONE &&
(output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 ))
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
}
}
decoder->channel_decodes = channel_decodes;
decoder->channel_blend_type = channel_blend_type;
decoder->channel_swapped_flags = channel_swapped_flags;
}
else
{
decoder->channel_decodes = channel_decodes = 1;
decoder->channel_blend_type = BLEND_NONE;
decoder->channel_swapped_flags = 0;
}
if(cfhddata) // So the P-frames can know the bayerformat
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
if (size > sizeof(CFHDDATA)) {
size = sizeof(CFHDDATA);
}
memcpy(cfhddata, &decoder->cfhddata, size);
}
{
bool doOrientation = true;
bool doFraming = true;
bool doBurins = true;
bool doImageflips = true;
bool doGhostBust = false;
bool doPrimaries = true;
int process_path_flags = decoder->cfhddata.process_path_flags;
int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask;
if(decoder->basic_only)
{
doOrientation = false;
doFraming = false;
doBurins = false;
doImageflips = false;
doPrimaries = false;
}
else
{
if(decoder->cfhddata.process_path_flags_mask)
{
//DAN20101007 --
if(process_path_flags == 0)
decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask;
process_path_flags &= decoder->cfhddata.process_path_flags_mask;
if(process_path_flags_mask & PROCESSING_ACTIVE2)
{
if(!(process_path_flags_mask & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags_mask & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags_mask & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags_mask & PROCESSING_IMAGEFLIPS))
doImageflips = false;
}
if(!(process_path_flags_mask & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
if(process_path_flags & PROCESSING_ACTIVE2)
{
if(!(process_path_flags & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags & PROCESSING_IMAGEFLIPS))
doImageflips = false;
if(!(process_path_flags & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
}
if(doOrientation)
process_path_flags |= PROCESSING_ORIENTATION;
if(doFraming)
process_path_flags |= PROCESSING_FRAMING;
if(doBurins)
process_path_flags |= PROCESSING_BURNINS;
if(doImageflips)
process_path_flags |= PROCESSING_IMAGEFLIPS;
if(doPrimaries)
process_path_flags |= PROCESSING_COLORMATRIX;
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
doGhostBust = true;
}
}
decoder->cfhddata.process_path_flags = process_path_flags;
if((!decoder->basic_only &&
(doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL ||
decoder->cfhddata.channel[0].FloatingWindowMaskR ||
decoder->cfhddata.channel[0].FrameKeyStone ||
decoder->cfhddata.channel[0].FrameTilt ||
decoder->cfhddata.channel[0].HorizontalOffset ||
decoder->cfhddata.channel[0].VerticalOffset ||
decoder->cfhddata.channel[0].RotationOffset ||
decoder->cfhddata.channel[1].FloatingWindowMaskL ||
decoder->cfhddata.channel[1].FloatingWindowMaskR ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[1].HorizontalOffset ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FloatingWindowMaskL ||
decoder->cfhddata.channel[2].FloatingWindowMaskR ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].FrameTilt ||
decoder->cfhddata.channel[2].HorizontalOffset ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0)))
||
(doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[2].user_blur_sharpen != 0.0))
||
(doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[1].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[2].user_vignette_start != 0.0))
||
(doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
||
(doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2))
||
(doImageflips && decoder->cfhddata.channel_flip)
||
(decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) ||
(decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) ||
(decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes.
( ((decoder->frame.width+7)/8)*8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) ||
decoder->sample_uncompressed) ||
(decoder->cfhddata.doMesh)
)
{
if( output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 )
{
// no manipulation should be applied
}
else
{
use_local_buffer = true;
local_pitch = ((decoder->frame.width+7)/8)*8 * 6; //RGB48
if(decoder->image_dev_only)
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
else if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
local_pitch = ((decoder->frame.width+7)/8)*8 * 8;
}
else
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
local_pitch *= 2; // need horizontal room to make 3D side by side frame
}
/*
if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A)
{
// preserve HDR
decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output
if(output_format == DECODED_FORMAT_W13A)
local_pitch = decoder->frame.width * 8;
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
local_pitch = decoder->frame.width * 8;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG48;
}
}*/
channel_offset = local_pitch * (decoder->frame.height);
}
}
}
if(output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
if(decoder->BYR4LinearRestore == NULL)
{
int j,val;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
//int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
float encode_curvebase;
if(encode_curve_type) //1 or 2
{
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
else
{
encode_curve_type = CURVE_TYPE_LOG;
encode_curvebase = 90.0;
}
#if _ALLOCATOR
decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator,16384*2, 16);
#else
decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384*2, 16);
#endif
for(j=0; j<16384; j++) //0 to 1
{
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = (int)(CURVE_LOG2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_GAMMA:
val = (int)(CURVE_GAM2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINEON:
val = (int)(CURVE_CINEON2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINE985:
val = (int)(CURVE_CINE9852LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_PARA:
val = (int)(CURVE_PARA2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_CSTYLE:
val = (int)(CURVE_CSTYLE2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_SLOG:
val = (int)(CURVE_SLOG2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LOGC:
val = (int)(CURVE_LOGC2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LINEAR:
default:
val = j;
break;
}
if(val < 0) val = 0;
if(val > 65535) val = 65535;
decoder->BYR4LinearRestore[j] = val;
}
}
}
//DAN20120319 - removed
/*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size
{
local_pitch *= 2;
channel_offset = local_pitch * (decoder->frame.height*2);
}*/
if(use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats
{
int stereoframesize = channel_offset * channel_decodes/*stacked frames*/;
if(decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE)
stereoframesize = channel_offset;
if(channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(decoder->StereoBuffer==NULL || decoder->StereoBufferSize < stereoframesize)
{
#if _ALLOCATOR
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#else
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#endif
assert(decoder->StereoBuffer != NULL);
if (! (decoder->StereoBuffer != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->StereoBufferSize = stereoframesize;
}
decoder->StereoBufferFormat = internal_format;
local_buffer = (uint8_t *)decoder->StereoBuffer;
local_output = local_buffer;
}
DecodeEntropyInit(decoder);
//swapped -- Maybe useful for double height decodes.
/* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED)
{
local_output += channel_offset;
channel_offset = -channel_offset;
}*/
decoder->use_local_buffer = use_local_buffer ? 1 : 0;
if(channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1)
{
int encoded_width = decoder->frame.width;
int encoded_height = decoder->frame.height;
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
encoded_height *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 4;
encoded_height *= 4;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_height *= 2;
}
#if _ALLOCATOR
decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#else
decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl;
DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#endif
}
// Using the parallel decoder?
if (decoder->parallelDecoder)
{
// Initialize the parallel decoder with parameters from the regular decoder
memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA));
memcpy(decoder->parallelDecoder->licensekey,decoder->licensekey, 16);
DecodeEntropyInit(decoder->parallelDecoder);
DecodeOverrides(decoder->parallelDecoder, decoder->overrideData, decoder->overrideSize);
decoder->parallelDecoder->channel_decodes = decoder->channel_decodes;
decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type;
decoder->parallelDecoder->flags = decoder->flags;
decoder->parallelDecoder->frame = decoder->frame;
decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0;
decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format;
if(decoder->parallelDecoder->decoder_thread.pool.thread_count == 0)
{
CreateLock(&decoder->parallelDecoder->decoder_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool,
1, //
ParallelThreadProc,
decoder->parallelDecoder);
}
}
if(channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
// Second stream as a thread.
BITSTREAM second_input = *input;
if(decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap
{
BITSTREAM leftEye_input = *input;
SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work
*input = leftEye_input;
SkipVideoChannel(decoder, &second_input, 1); // 3D work
}
else
SkipVideoChannel(decoder, &second_input, 2); // 3D work
decoder->channel_current = 0;
decoder->parallelDecoder->channel_current = 1;
// Instead of reading the metadata databases again, use the ones in the main decoder
OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed);
// DAN20110404 Use left (first) eye metadata for both eyes (just in case right GUID is bad.)
// OverrideCFHDDATA(decoder->parallelDecoder, input->lpCurrentBuffer, input->nWordsUsed);
//OverrideCFHDDATA(decoder->parallelDecoder, second_input.lpCurrentWord, second_input.nWordsUsed);
// Hack, this gets lost
decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position;
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
// Post a message to the mailbox
decoder->parallelDecoder->decoder_thread.input = &second_input;
if(use_local_buffer == false &&
(decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24))
{
decoder->parallelDecoder->decoder_thread.output = local_output;
local_output += channel_offset;
}
else
{
decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset;
}
decoder->parallelDecoder->decoder_thread.pitch = local_pitch;
decoder->parallelDecoder->decoder_thread.colorparams = colorparams;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START);
// do the first channel
{
TAGVALUE segment;
int sample_type;
#if _THREADED
decoder->entropy_worker_new.next_queue_num = 0;
decoder->entropy_worker_new.threads_used = 0;
#endif
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool);
}
else
{
while(channel_decodes > 0)
{
TAGVALUE segment;
int sample_type;
local_decoder->channel_current = channel_current++;
//OverrideCFHDDATA(local_decoder, input->lpCurrentBuffer, input->nWordsUsed);
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) //overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
#if _THREADED
local_decoder->entropy_worker_new.next_queue_num = 0;
local_decoder->entropy_worker_new.threads_used = 0;
#endif
if(decoder->image_dev_only)
{
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
}
else
{
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
local_decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
if(ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset))
{
channel_decodes = 0;
}
else
{
channel_decodes--;
local_output += channel_offset;
if(decoder->parallelDecoder)
{
local_decoder = decoder->parallelDecoder;
}
}
}
}
if(use_local_buffer && output)
{
decoder->use_local_buffer = 0;
#if WARPSTUFF
WarpFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
MaskFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
#endif
ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset));
}
else
{
#if WARPSTUFF
WarpFrame(decoder, output, pitch, output_format);
MaskFrame(decoder, output, pitch, output_format);
#endif
}
if(decoder->channel_mix_half_res) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
decoder->frame.height *= 2;
decoder->channel_mix_half_res = 0;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
}
#if _GRAPHICS
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
PaintFrame(decoder, output, pitch, output_format);
}
#endif
STOP(tk_decompress);
// Return indication of whether decoding succeeded or failed
return result;
}
// Decode a sample that encoded a group of frames (return the first frame)
bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
#if (0 && DEBUG)
// Force quarter resolution decoding for debug that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoding sample group\n");
}
#endif
START(tk_decoding);
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
//segment = GetTagValue(input);
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) break;
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_complete;
}
else
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the temporal wavelet
int temporal_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[temporal_index];
#if (0 && DEBUG)
if (IsBandValid(wavelet, HIGHPASS_BAND))
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Temporal-decode-%d-", count);
DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL);
}
count++;
}
#endif
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the temporal wavelet been decoded?
//if (wavelet && BANDS_ALL_VALID(wavelet))
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the temporal inverse transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, temporal_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 0 );
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
else if (wavelet == NULL)
{
// The temporal wavelet is not created during quarter resolution decoding
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_complete:
STOP(tk_decoding);
#if (0 && DEBUG)
if (logfile)
{
char label[_MAX_PATH];
int channel;
for (channel = 0; channel < codec->num_channels; channel++)
{
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[2];
uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND];
int height = wavelet->height;
int pitch = wavelet->pitch;
int size = height * pitch;
int band;
for (band = 0; band < wavelet->num_bands; band++)
{
sprintf(label, "Temporal channel: %d, band: %d", channel, band);
DumpBandStatistics(label, wavelet, band, logfile);
#if 0
sprintf(label, "Temporal-channel%d-band%d-", channel, band);
DumpBandPGM(label, wavelet, band, NULL);
#endif
}
assert(size > 0);
ZeroMemory(data, size);
}
}
#endif
if (result)
{
// Two frames have been decoded
decoder->gop_length = 2;
decoder->frame_count += 2;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame in the group
if (!decoder->no_output)
{
#if 0
// Decoding to quarter frame resolution at full frame rate?
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
char *buffer = decoder->buffer;
size_t buffer_size = decoder->buffer_size;
uint8_t *frame1 = output;
uint8_t *frame2 = decoder->output2;
assert(frame2 != NULL);
// Reconstruct two frames at quarter resolution
ReconstructQuarterFrame(decoder, num_channels,
frame1, frame2, pitch,
info, buffer, buffer_size);
}
else
#endif
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that represents the second frame in a group
bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
bool result = true;
START(tk_decoding);
// Decode the tag value pairs in the frame sample
for (;;)
{
TAGWORD tag;
TAGWORD value;
// Read the next tag value pair from the bitstream
//TAGVALUE segment = GetTagValue(input);
TAGVALUE segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
tag = segment.tuple.tag;
value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
}
// End of the frame header?
if (tag == CODEC_TAG_FRAME_INDEX) break;
}
STOP(tk_decoding);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
if (result)
{
// Return the second frame in the group
// assert(decoder->gop_length >= 2);
if (decoder->gop_length >= 2)
{
int frame_index = 1; // Display the second frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
else if (decoder->gop_length > 0)
{
int frame_index = 0; // Display the first frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Frame type that is not handled
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that encodes an intra frame
bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
START(tk_decoding);
if(decoder->image_dev_only) goto decoding_completeI;
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
//Force V210 output for debugging ***DEBUG***
//decoder->frame.format = DECODED_FORMAT_V210;
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) {
break;
}
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_completeI;
}
else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the highest wavelet in the pyramid
int wavelet_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[wavelet_index];
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the wavelet been decoded?
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, wavelet_index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
#if (0 & DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the inverse spatial transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, wavelet_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 0);
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
//else if (wavelet == NULL)
else
{
// The wavelet may not have been created during quarter resolution decoding
// The wavelet should have been created if all bands are valid
assert(wavelet != NULL);
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_completeI:
STOP(tk_decoding);
if (result)
{
// One frame has been decoded
decoder->gop_length = 1;
decoder->frame_count += 1;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame (the only frame that was decoded)
if (!decoder->no_output)
{
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
//CODEC_STATE *codec = &decoder->codec;
TRANSFORM **transform_array = decoder->transform;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
FRAME_INFO *info = &decoder->frame;
int precision = codec->precision;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision);
}
else
{
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_ERROR error = CODEC_ERROR_OKAY;
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
CHANNEL_HEADER header;
TRANSFORM *transform = decoder->transform[channel];
TRANSFORM *next_transform;
// Advance to the next channel
channel++;
// Get the next transform for decoded information
//TRANSFORM *next_transform = AllocGroupTransform(group, channel);
// Decode the rest of the channel header
error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL);
assert(error == CODEC_ERROR_OKAY);
decoder->error = error;
if (error != CODEC_ERROR_OKAY) return false;
// The decoder is not able to skip channels
assert(header.channel == channel);
// Initialize the next transform using the previous one
next_transform = decoder->transform[channel];
InitChannelTransform(next_transform, transform);
// Update the channel
codec->channel = channel;
// Reset the subband counter
codec->band.subband = 0;
// Reset the decoded subband flags
codec->decoded_subband_flags = 0;
// Loop back to decode the next channel
//transform = next_transform;
return true;
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int *subband_wavelet_index = decoder->subband_wavelet_index;
// Used for quarter resolution and threaded decoding
int transform_type = transform->type;
// Wavelet parameters
int width;
int height;
int level;
int type;
int band;
int threading = 1;
// Wavelet containing the band to decode
int index;
IMAGE *wavelet = NULL;
bool result;
if(subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS)
threading = 0;
// Update the transform data structure from the codec state
UpdateCodecTransform(transform, codec);
// Is this an empty band?
if (subband == 255)
{
// Decode an empty band
// This wavelet is the temporal wavelet
index = 2;
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
// The empty band should be the highpass band in a temporal wavelet
assert(type == WAVELET_TYPE_TEMPORAL && band == 1);
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// Set the wavelet parameters
wavelet->pixel_type[band] = PIXEL_TYPE_16S;
wavelet->num_bands = 2;
result = DecodeSampleEmptyBand(decoder, input, wavelet, band);
// Set the subband number for the next band expected in the bitstream
codec->band.subband = 11;
}
// Is this a highpass band?
else if (subband > 0)
{
// Decode a highpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[subband];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandStartedFlags(decoder, wavelet, band);
}
// Reset the default encoding method
codec->band.encoding = BAND_ENCODING_RUNLENGTHS;
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
else
{
// Decode a lowpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[0];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->lowpass.width;
height = codec->lowpass.height;
level = codec->lowpass.level;
type = codec->first_wavelet;
//band = codec->band.number;
band = 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// The lowpass data is always stored in wavelet band zero
assert(band == 0);
// The lowpass band must be subband zero
assert(subband == 0);
result = DecodeSampleLowPassBand(decoder, input, wavelet);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
// Was the subband successfully decoded?
if (result)
{
// The transform will set the band valid flag if this is the temporal wavelet
//if (index != 2)
// Record that this subband has been decoded successfully
if (0 <= subband && subband <= CODEC_MAX_SUBBAND)
codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n",
subband, index, channel);
}
#endif
}
#if _THREADED_DECODER
// Ready to queue a threaded transform to invert this wavelet?
if (BANDS_ALL_STARTED(wavelet))
{
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2)
{
if(decoder->entropy_worker_new.pool.thread_count && threading)
{
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 1);
// Add the inverse wavelet transform to the processing queue
QueueThreadedTransform(decoder, codec->channel, index);
}
else
{
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 0);
}
}
}
#else
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
if (BANDS_ALL_VALID(wavelet))
{
int channel = codec->channel;
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0);
}
#endif
return result;
}
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
bool result = true;
int lowpass_width; // Lowpass band dimensions
int lowpass_height;
int lowpass_pitch;
PIXEL *pLowPassRow; // Pointer into the lowpass band
//int wavelet_width; // Dimensions of the wavelet image
//int wavelet_height;
int bits_per_pixel;
int quantization;
int offset;
//int pixel_divisor = (1 << (2 * codec->lowpass.level));
int row, column;
int32_t solid_color = -1;
const int gain = 128;
const int colorshift = 0;
// int channelgain[4];
//int waterrow=19, watercol=214;
//int cspace = decoder->frame.colorspace;
// Lowpass image dimensions may be smaller than the wavelet dimensions
// because the encoder may have transmitted an image without the border
lowpass_width = codec->lowpass.width;
lowpass_height = codec->lowpass.height;
lowpass_pitch = wavelet->pitch/sizeof(PIXEL);
pLowPassRow = wavelet->band[0];
// Get the parameters for quantization performed by the encoder
quantization = codec->lowpass.quantization;
offset = codec->lowpass.pixel_offset;
bits_per_pixel = codec->lowpass.bits_per_pixel;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode lowpass subband\n");
}
#endif
if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width&1))
{
int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord;
//int signval = 0;
//int channel3stats = 0;
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
channeloffset = codec->num_frames==2 ? 14 : 4;//DAN20090601, recal I-frame DAN20110301
break;
default:
channeloffset = codec->num_frames==2 ? 48 : 24;//DAN20090601
}
if(decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?)
channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames
}
else if(decoder->codec.precision == 12)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
channeloffset = 8; //DAN200906010
break;
// 16-bit precision:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
channeloffset = 0;
break;
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed.
break;
default:
channeloffset = 0;
break;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames
channeloffset = 0;
#define DUMPLL 0
#if (_DEBUG && DUMPLL)
FILE *fp;
if(channel == 0)
{
static int inc = 1;
char name[256];
sprintf(name,"C:\\Cedoc\\LLdec%03d.pgm", inc++);
fp = fopen(name,"w");
fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height);
}
#endif
#if LOSSLESS
channeloffset = 0; //LOSSLESS
#endif
//if(lpCurrentLong[0] == 0xffffffff)
if(lpCurrentLong[0] == (int32_t)UINT32_MAX)
{
if(SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width)
{
if(SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height)
{
solid_color = SwapInt32BtoN(lpCurrentLong[1]);
solid_color |= (solid_color<<16);
lpCurrentLong += 4;
}
}
}
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
int pixels;
// Start at the first column
column = 0;
// Process the rest of the row
{
for (; column < lowpass_width; column++)
{
int pixel_value;
//int i;
// Perform inverse quantization
if(column & 1)
{
pixel_value = pixels;
}
else
{
//pixels = _bswap(*(lpCurrentLong++));
if(solid_color == -1)
pixels = SwapInt32BtoN(*(lpCurrentLong++));
else
pixels = solid_color;
pixel_value = (pixels>>16);
pixels <<= 16;
pixels >>= 16;
}
// Store the pixel in the lowpass band of the wavelet
pixel_value += channeloffset;
// pixel_value -= 64;
// pixel_value += ((rand() & 0x7fff) - 0x4000);
// if(pixel_value < 0) pixel_value = 0;
if(pixel_value > 0x7fff) pixel_value = 0x7fff;
pLowPassRow[column] = pixel_value;
#if (_DEBUG && DUMPLL)
if(channel==0 && fp)
fprintf(fp, "%d\n", pixel_value>>7);
#endif
}
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if (_DEBUG && DUMPLL)
if(channel == 0 && fp)
fclose(fp);
#endif
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentLong;
}
else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE)
{
uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord;
//int signval = 0;
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
// Start at the first column
column = 0;
// Process the rest of the row
for (; column < lowpass_width; column++)
{
int pixel_value = *(lpCurrentByte++);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
pixel_value -= 128 * quantization;
pixel_value *= gain;
pixel_value >>= 7;
pixel_value += 128 * quantization;
pixel_value += colorshift;
// Store the pixel in the lowpass band of the wavelet
// Multiply by 16 to turn 8-bit into the new 16-bit format
pLowPassRow[column] = pixel_value * 16;
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentByte;
}
else
{
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
channeloffset = (codec->num_frames==2 ? 10 : 5);
}
else if(decoder->codec.precision == 12)
{
// channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images
}
//DAN20050923 no longer trying to compensate for YUV to RGB issues.
if(decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32)
{
if(decoder->codec.precision == 8)
{
switch(channel)
{
case 0: channeloffset += 8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 16; break;
case 2: channeloffset += 10; break;
}
}
else if(decoder->codec.precision == 10)
{
switch(channel)
{
case 0: channeloffset += -8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += -4; break;
case 2: channeloffset += -4; break;
}
}
else if(decoder->codec.precision == 12)
{
switch(channel)
{
case 0: channeloffset += 0; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 0; break;
case 2: channeloffset += 0; break;
}
}
}
if(bits_per_pixel != 16)
channeloffset = 0;
for (row = 0; row < lowpass_height; row++)
{
for (column = 0; column < lowpass_width; column++) {
int pixel_value = GetBits(stream, bits_per_pixel);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
// Store the pixel in the lowpass band of the wavelet
pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes
}
stream->nWordsUsed -= lowpass_width*(bits_per_pixel>>3);
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
}
// Set the wavelet scale factor
wavelet->scale[0] = quantization;
// Align the bitstream to the next tag value pair
AlignBitsTag(stream);
// Return indication of lowpass decoding success
return result;
}
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int index = codec->highpass.wavelet_number;
int width;
int height;
int quantization;
// The encoder may not have used variable-length coding
int method = codec->band.encoding;
bool result = true;
// Check that the band index is in range
assert(0 <= band && band <= codec->max_subband);
// Encoded coefficients start on a tag boundary
AlignBitsTag(stream);
#if (0 && DEBUG)
// Dump the band header to the logfile
if (logfile) {
fprintf(logfile,
"Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n",
header->marker, header->subband, header->width, header->height, header->encoding);
}
#endif
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply this parameter)
if (codec->band.scale > 0) {
wavelet->scale[band] = codec->band.scale;
}
// Get the quantization factor that was used to encode the band coefficients
quantization = codec->band.quantization;
// Copy the quantization into the wavelet
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization);
}
#endif
// Get the highpass band dimensions
width = codec->band.width;
height = codec->band.height;
// Is this a special band for the temporal high pass thumbnail?
if (method == BAND_ENCODING_LOSSLESS)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else if (method == BAND_ENCODING_16BIT)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16s(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else
{
// Must use the runlength encoding method
assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS);
#if 0
// This code attempts to not decode various subbands for 1/4 res decodes.
// Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.)
if (subband >= 4 && subband <= 6)
{
TAGVALUE segment;
AlignBitsTag(stream);
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
else
#elif 0
// Is this subband required for decoding the frame?
if (CanSkipSubband(decoder, subband))
{
// Skip past the end of this subband
SkipSubband(stream);
}
#endif
// Decode this subband
result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading);
}
// Return failure if a problem was encountered while reading the band coefficients
if (!result) return result;
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band %d trailer: %d\n", band, error);
}
#endif
return false;
}
return result;
}
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int quantization;
// Check that the band is in range
assert(0 <= band && band <= CODEC_MAX_HIGHBANDS);
// Check that the highpass band is 16 bits
assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S);
#if (0 && DEBUG)
//TODO: Change format string to handle 64-bit pointers
if (logfile) {
fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord);
}
#endif
// Encoded coefficients must start on a word boundary
AlignBits(stream);
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply the parameter)
if (codec->band.scale > 0)
wavelet->scale[band] = codec->band.scale;
// Set the quantization used to encode the band coefficients
quantization = codec->band.quantization;
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
DumpBits(stream, logfile);
}
#endif
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band: %d, error: %d\n", band, error);
}
#endif
return false;
}
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
#if (0 && DEBUG)
// Dump the band trailer to the logfile
if (logfile) {
fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker);
}
#endif
#if (0 && DEBUG)
if (logfile) {
//TODO: Change format string to handle 64-bit pointers
fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord);
}
#endif
return true;
}
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
PIXEL *rowptr = wavelet->band[band_index];
int pitch = wavelet->pitch;
int row,dequant = wavelet->quantization[band_index];
// Convert the pitch from bytes to pixels
pitch /= sizeof(PIXEL);
//BAND_ENCODING_16BIT
if(dequant == 1)
{
for (row = 0; row < height; row++)
{
int column;
#if 0
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value;
}
#else // Mild speedup (2.5% overall half-res decode improvement.)
char *sptr = (char *)stream->lpCurrentWord;
char *dptr = (char *)rowptr;
for (column = 0; column < width; column++)
{
*(dptr+1) = *sptr++;
*dptr = *sptr++;
dptr+=2;
}
stream->lpCurrentWord += width*2;
stream->nWordsUsed += width*2;
#endif
rowptr += pitch;
}
}
else
{
for (row = 0; row < height; row++)
{
int column;
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value*dequant;
}
rowptr += pitch;
}
}
#if (0 && DEBUG)
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Hightemp-decode-%d-", count);
DumpBandPGM(label, wavelet, band_index, NULL);
}
count++;
}
#endif
return true;
}
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
//CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
//int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
//int threading = 0;
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (! (wavelet != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//Must have a valid FSM
assert(fsm != NULL);
if (! (fsm != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// All rows are treated as one int32_t row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch;
assert(rowptr != NULL && pitch != 0);
if (! (rowptr != NULL && pitch != 0)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes.
if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
if(quant)
{
int x,y;
PIXEL *line = rowptr;
if(quant == 32)
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] <<= 5;
}
line += pitch/2;
}
}
else
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] *= quant;
}
line += pitch/2;
}
}
}
/* if(once <= 60)
{
char name[200];
FILE *fp;
sprintf(name,"C:/Cedoc/DUMP/Decoder/dump%02d.raw", once);
fp = fopen(name,"wb");
fwrite(rowptr,width*height,1,fp);
fclose(fp);
once++;
}*/
assert(result == true);
if (! (result == true)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
return true;
}
// Invert the wavelet to reconstruct the lower wavelet in the transform
void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel,
IMAGE *wavelet, int index, int precision,
const SCRATCH *scratch, int allocations_only)
{
int transform_type = transform->type;
int width = wavelet->width;
int height = wavelet->height;
int level = wavelet->level;
PIXEL *buffer = (PIXEL *)scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Is the current wavelet a spatial wavelet?
if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0)
{
// Reconstruct the lowpass band in the lower wavelet
int lowpass_index = index - 1;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL;
//const int prescale = 1;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
// Check that the lowpass band has not already been reconstructed
//assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
if(!allocations_only)
{
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Has this wavelet already been reconstructed?
if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0)
{
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
}
// Is the current wavelet a spatial wavelet above the temporal lowpass band?
else if (index > 3)
{
// Reconstruct the lowpass band in the lower wavelet
const int temporal_wavelet_index = 2;
int lowpass_index = (index > 4) ? index - 1 : index - 2;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
//const int prescale = 2;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
if(!allocations_only)
{
// Check that the lowpass band has not already been reconstructed
assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the spatial wavelet above the temporal highpass band?
else if (index == 3)
{
// Reconstruct the highpass band in the temporal wavelet
const int temporal_wavelet_index = 2;
int highpass_index = index - 1;
IMAGE *highpass = transform->wavelet[highpass_index];
int highpass_width = 2 * width;
int highpass_height = 2 * height;
int highpass_level = level - 1;
int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = inverse_prescale ? transform->prescale[index] : 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
highpass = GetWaveletThreadSafe(decoder, transform, highpass_index,
highpass_width, highpass_height,
highpass_level, highpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
highpass = ReallocWaveletEx(decoder->allocator, highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#else
highpass = ReallocWaveletEx(highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#endif
transform->wavelet[highpass_index] = highpass;
#endif
if(!allocations_only)
{
// Check that the highpass band has not already been reconstructed
assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, highpass, 1);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the temporal wavelet?
else if (index == 2)
{
// Get the temporal wavelet
IMAGE *temporal = wavelet;
// Set the frame wavelet parameters
int frame_level = 1;
int frame_type = WAVELET_TYPE_FRAME;
// Get the two frame wavelets
IMAGE *frame[2];
frame[0] = transform->wavelet[0];
frame[1] = transform->wavelet[1];
// Check that the temporal wavelet is valid
assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL);
#if _THREADED_DECODER
// Allocate (or reallocate) the frame wavelets with thread safety
frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type);
frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type);
#else
// Allocate the frame wavelets if not already allocated
#if _ALLOCATOR
frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type);
#else
frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type);
#endif
transform->wavelet[0] = frame[0];
transform->wavelet[1] = frame[1];
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Before inverse temporal transform");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
}
#endif
if(!allocations_only)
{
// Check that the lowpass bands have not already been reconstructed
assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(temporal));
// Invert the temporal transform between the frame wavelets
STOP(tk_decoding);
START(tk_inverse);
TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision);
STOP(tk_inverse);
START(tk_decoding);
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = quad[0];
fprintf(logfile, "After inverse temporal transform\n");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, frame[0], 0);
UpdateWaveletBandValidFlags(decoder, frame[1], 0);
#if TIMING
// Increment the number of temporal transforms performed outside of decoding
temporal_decoding_count++;
#endif
}
}
}
// Compute the dimensions of the output buffer
void ComputeOutputDimensions(DECODER *decoder, int frame,
int *decoded_width_out, int *decoded_height_out)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet = NULL;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
int decoded_scale = 0;
if (decoded_width_out == NULL || decoded_height_out == NULL) {
return;
}
// Clear the return values in case this routine terminates early
*decoded_width_out = 0;
*decoded_height_out = 0;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
}
else
{
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
}
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
break;
default:
assert(0);
break;
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
decoded_width = wavelet_width;
else
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
// Return the decoded width and height
*decoded_width_out = decoded_width;
*decoded_height_out = decoded_height;
}
#define DEBUG_ROW16U 0
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
FRAME_INFO local_info;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &local_info;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = decoder->frame.resolution;
int chroma_offset = decoder->codec.chroma_offset;
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
//TODO: Change this routine to return the codec error code
CODEC_ERROR error = CODEC_ERROR_OKAY;
//if(decoder->cfhddata.calibration)
// LoadTweak();
//TODO: Change this routine to return an error code
if (decoder == NULL) {
return;
}
decoder->gop_frame_num = frame;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
//return;
// copy frame info in a changable local structure
memcpy(info, &decoder->frame, sizeof(FRAME_INFO));
// Use the old code for reconstructing the frame
#if (0 && DEBUG)
// Force quarter resolution decoding for debugging that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame);
}
#endif
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) return;
#if (1 && DEBUG_ROW16U)
// Force decoding to 16-bit pixels for debugging
info->format = DECODED_FORMAT_YR16;
#endif
#if 0
if (info->format == DECODED_FORMAT_YR16)
{
// Force interlaced or progressive decoding for debugging
//progressive = false;
progressive = true;
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder discarding frame: %d\n", frame);
}
#endif
return;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
decoder->error = CODEC_ERROR_RESOLUTION;
return;
}
#if (0 && TIMING) //(0 && DEBUG)
// Override progressive flag read from the bitstream for debugging
//progressive = 0; // Use the inverse frame transform
progressive = 1; // Use the inverse spatial transform
#endif
// Build the 3D LUTs if needed
ComputeCube(decoder);
//HACK DAN20110131 -- some formats will not directly decode so need to use the AM route
{
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
resolution == DECODED_RESOLUTION_HALF)
{
if( decoder->frame.format == COLOR_FORMAT_R408 ||
decoder->frame.format == COLOR_FORMAT_V408)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
if( decoder->frame.format == COLOR_FORMAT_NV12)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true; // TODO, make it work with this.
}
if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
// Get the decoding scale
if(!uncompressed)
{
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = 2 * wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
}
else
{
wavelet = transform_array[0]->wavelet[3];
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
default:
assert(0);
break;
}
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
decoded_width = info->width/2;
decoded_height = info->height/2;
}
else
{
decoded_width = info->width;
decoded_height = info->height;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
if(resolution == DECODED_RESOLUTION_FULL)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
if(decoded_width*2 == info->width)
{
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
}
else if(decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER;
}
}
else
{
if(resolution == DECODED_RESOLUTION_HALF)
{
if(decoded_width*2 == info->width)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL;
}
}
else if(resolution == DECODED_RESOLUTION_QUARTER)
{
if(uncompressed)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED;
}
else
{
if(decoded_width == info->width)
{
info->resolution = resolution = DECODED_RESOLUTION_HALF;
}
}
}
}
}
if(uncompressed)
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210)
error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0)
error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
else
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4
case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4
error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
//error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2
// Add new code here for the final steps in decoding the original YUV 4:2:2 format
error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch);
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
// Was the newer code able to successfully reconstruct the frame?
if (error != CODEC_ERROR_UNSUPPORTED_FORMAT)
{
// Save the codec error code in the decoder state and return
decoder->error = error;
return;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
assert((info->height+7)/8 >= (decoded_height+7)/8);
if (!(info->width >= decoded_width && (info->height+7)/8 >= (decoded_height+7)/8)) {
decoder->error = CODEC_ERROR_FRAMESIZE;
return;
}
#if (0 && DEBUG)
if (logfile) {
//SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16);
SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16);
// Adjust the subimage to be at the middle of the right border
//subimage.row += wavelet_height/2 - 8;
DumpBand("SIF Image", wavelet, 0, &subimage, logfile);
}
#endif
START(tk_inverse);
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
}
else
// Was the first transform a frame transform (used for interlaced frames)?
if (!progressive)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY))
{
// Apply the inverse frame transform and pack the results into the output buffer
int precision = codec->precision;
#if (0 && DEBUG)
DumpWaveletBandsPGM(wavelet, frame, num_channels);
#endif
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
int precision = codec->precision;
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int scale = 13;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
int precision = codec->precision;
TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
}
}
}
else // The first transform was a spatial transform (used for progressive frames)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
uint8_t *pixoutput = output;
if(decoder->use_active_metadata_decoder) //WIP
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToBayerYUV);
}
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
}
#else
//TODO : Accelerated BAYER for single thread decoding.
assert(0);
// Transform the wavelets for each channel to the output image (not threaded)
//TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info,
// &decoder->scratch, chroma_offset, precision);
#endif
}
else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER &&
(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
{
uint8_t *pixoutput = output;
if(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
pixoutput += (info->height-1)*pitch;
pitch = -pitch;
}
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
int precision = codec->precision;
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
STOP(tk_inverse);
#if 1 //|| BAYER_SUPPORT
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[4096*3],*sptr;
//unsigned short scanline2[4096*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
IMAGE *gd_image = lowpass_images[3];
uint8_t *outyuv,*line = output;
PIXEL *bayer_line, *bayerptr;
PIXEL *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
int format = info->format;
bool inverted = false;
int maxbound = 4095; //10-bit source
int midpoint = 32768>>3;
int shift = 4;
if(precision == 12)
{
maxbound = 16383;
midpoint = 32768>>1;
shift = 2;
}
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32)
{
inverted = true;
line += (info->height-1)*pitch;
pitch = -pitch;
}
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
for(y=0; y<info->height; y++)
{
uint8_t *newline = line;
PIXEL *newG=G,*newRG=RG,*newBG=BG;
PIXEL *gptr,*rgptr,*bgptr,*gdptr;
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
newline += pitch*y;
newG += y * (g_image->pitch / sizeof(PIXEL));
newRG += y * (rg_image->pitch / sizeof(PIXEL));
newBG += y * (bg_image->pitch / sizeof(PIXEL));
gptr = newG;
rgptr = newRG;
bgptr = newBG;
sptr = scanline;
for(x=0; x<info->width; x++)
{
g = (*gptr++);
if(g > maxbound) g = maxbound;
rg = (*rgptr++);
bg = (*bgptr++);
r = (rg<<1) - midpoint + g;
b = (bg<<1) - midpoint + g;
if(r > maxbound) r = maxbound;
if(b > maxbound) b = maxbound;
if(r < 0) r = 0;
if(g < 0) g = 0;
if(b < 0) b = 0;
*sptr++ = r<<shift;
*sptr++ = g<<shift;
*sptr++ = b<<shift;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr,
newline, y, pitch,
info->format, whitebitdepth, flags);
}
}
#endif
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
uint8_t *line = output;
unsigned char *rgb8;
PIXEL *G,*RG,*BG;
int x,y;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
if(info->format == DECODED_FORMAT_RGB32)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
*rgb8++ = 255;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
}
else
#endif
{
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
START(tk_inverse);
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int width = info->width;
int height = info->height;
sprintf(label, "Output");
DumpBufferStatistics(label, output, width, height, pitch, logfile);
}
#endif
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
// Handle inversion of the output image in this routine
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
//#if BUILD_PROSPECT
// Output the frame in V210 foramt?
if( (format == DECODED_FORMAT_V210 ||
format == DECODED_FORMAT_YU64) &&
decoder->codec.encoded_format != ENCODED_FORMAT_BAYER )
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// The output buffer is an array of 10-bit pixels packed into double words
#if 0
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2,
buffer, buffer_size, chroma_offset, decoder->codec.precision);
#else
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
else
//#endif
// Decoding a full resolution progressive frame to a Bayer output format?
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*decoded_height*4*sizeof(PIXEL);
}
//TODO: Replace this memory allocation with a scratch buffer allocation
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*3*sizeof(PIXEL);
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*3*sizeof(PIXEL), 16);
#endif
decoder->RGBFilterBufferSize = info->width*decoded_height*4*3*sizeof(PIXEL);
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
if(decoder->RawBayer16)
{
uint8_t *line;
PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16;
PIXEL16U *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
//float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
#if 0
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5f}
};
float mtrx[3][4] =
{
{1.0f, 0, 0, 0},
{0, 1.0f, 0, 0},
{0, 0, 1.0f, 0}
};
float whitebalance[3] = { 1.0f, 1.0f, 1.0f };
#endif
#if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions
/* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2)
{
float fval = 0.0;
int i;
for(i=0; i<12; i++)
{
mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3];
if((i>>2) == (i&3))
{
if(fval != 1.0)
{
matrix_non_unity = 1;
}
}
else
{
if(fval != 0.0)
{
matrix_non_unity = 1;
}
}
}
// not active as VFW isn't yet support the 3D LUTs
if(decoder->cfhddata.version >= 5)
{
int j;
float encode_curvebase = 90.0;
float decode_curvebase = 90.0;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
int decode_curve_type = decoder->cfhddata.decode_curve >> 16;
if(decoder->cfhddata.user_white_balance[0] > 0.0)
{
wb_non_unity = 1;
whitebalance[0] = decoder->cfhddata.user_white_balance[0];
whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0;
whitebalance[2] = decoder->cfhddata.user_white_balance[3];
}
if(encode_curve_type) //1 or 2
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
else
{
encode_curve_type = 1;
encode_curvebase = 90.0;
}
if(decode_curve_type) //1 or 2
decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff);
else
{
decode_curve_type = 1;
decode_curvebase = 90.0;
}
for(j=0; j<2048; j++)
{
if(encode_curve_type == 1)
curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase);
else
curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase);
}
for(j=-512; j<=2048; j++) // -1 to +4
{
if(encode_curve_type == CURVE_TYPE_LOG)
lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase);
else
lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase);
}
}
}*/
#endif
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info,
&decoder->scratch, chroma_offset, precision);
#endif
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER &&
(info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4))
{
#if _THREADED //DemosaicRAW
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* int bayer_format = decoder->cfhddata.bayer_format;
unsigned char *outA8, *outB8;
unsigned short *lineStartA16, *lineStartB16;
unsigned short *lineA16, *lineB16;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++)
{
bayer_line = decoder->RawBayer16;
bayer_line += bayer_pitch * y;
if(y<info->height)
{
ColorDifference2Bayer(info->width,
bayer_line, bayer_pitch, bayer_format);
}
if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline
{
unsigned short *delayptr = decoder->RawBayer16;
delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES);
BayerRippleFilter(info->width,
delayptr, bayer_pitch, bayer_format, decoder->RawBayer16);
}
if(y>=DEMOSAIC_DELAYLINES)
{
int delay_y = y - DEMOSAIC_DELAYLINES;
unsigned short *sptr, scanline[8192*3];
outA8 = line;
line += pitch;
outB8 = line;
line += pitch;
sptr = scanline;
DebayerLine(info->width*2, info->height*2, delay_y*2,
decoder->RawBayer16, bayer_format, sptr, sharpening);
for(x=0; x<info->width*2; x++)
{
outA8[2] = *sptr++>>8;
outA8[1] = *sptr++>>8;
outA8[0] = *sptr++>>8;
outA8+=3;
}
for(x=0; x<info->width*2; x++)
{
outB8[2] = *sptr++>>8;
outB8[1] = *sptr++>>8;
outB8[0] = *sptr++>>8;
outB8+=3;
}
}
}*/
#endif // _THREADED
}
else
if(format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* {
int bayer_format = decoder->cfhddata.bayer_format;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
int bayer_format = decoder->cfhddata.bayer_format;
for(y=2; y<info->height-3; y++)
{
int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
}*/
#endif
}
// Pack the rows of Bayer data (full resolution progressive) into BYR3 format?
else if (format == DECODED_FORMAT_BYR3)
{
PIXEL16U *outR, *outG1, *outG2, *outB;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
// #pragma omp parallel for
for(y=0; y<info->height; y++)
{
uint8_t *line = output;
PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16;
line += pitch*2*y;
bayerptr += bayer_pitch * y;
outR = (PIXEL16U *)line;
outG1 = outR + (pitch/4);
outG2 = outR + (pitch/4)*2;
outB = outR + (pitch/4)*3;
G = (PIXEL16U *)bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
// Pack the rows of Bayer components into the BYR3 pattern
#if (1 && XMMOPT)
{
__m128i *G_128 = (__m128i *)G;
__m128i *RG_128 = (__m128i *)RG;
__m128i *BG_128 = (__m128i *)BG;
__m128i *GD_128 = (__m128i *)GD;
__m128i *outR_128 = (__m128i *)outR;
__m128i *outG1_128 = (__m128i *)outG1;
__m128i *outG2_128 = (__m128i *)outG2;
__m128i *outB_128 = (__m128i *)outB;
__m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff);
__m128i midpoint1 = _mm_set1_epi16(32768>>6);
__m128i midpoint2 = _mm_set1_epi16(32768>>5);
int column_step = 8;
int post_column = (info->width) - ((info->width) % column_step);
for (x=0; x < post_column; x += column_step)
{
__m128i r_128;
__m128i g1_128;
__m128i g2_128;
__m128i b_128;
__m128i g_128;
__m128i rg_128;
__m128i bg_128;
__m128i gd_128;
g_128 = _mm_load_si128(G_128++);
rg_128 = _mm_load_si128(RG_128++);
bg_128 = _mm_load_si128(BG_128++);
gd_128 = _mm_load_si128(GD_128++);
g_128 = _mm_srli_epi16(g_128, 6);
rg_128 = _mm_srli_epi16(rg_128, 5);
bg_128 = _mm_srli_epi16(bg_128, 5);
gd_128 = _mm_srli_epi16(gd_128, 6);
gd_128 = _mm_subs_epi16(gd_128, midpoint1);
rg_128 = _mm_subs_epi16(rg_128, midpoint2);
bg_128 = _mm_subs_epi16(bg_128, midpoint2);
r_128 = _mm_adds_epi16(rg_128, g_128);
b_128 = _mm_adds_epi16(bg_128, g_128);
g1_128 = _mm_adds_epi16(g_128, gd_128);
g2_128 = _mm_subs_epi16(g_128, gd_128);
r_128 = _mm_adds_epi16(r_128, limiter);
r_128 = _mm_subs_epu16(r_128, limiter);
g1_128 = _mm_adds_epi16(g1_128, limiter);
g1_128 = _mm_subs_epu16(g1_128, limiter);
g2_128 = _mm_adds_epi16(g2_128, limiter);
g2_128 = _mm_subs_epu16(g2_128, limiter);
b_128 = _mm_adds_epi16(b_128, limiter);
b_128 = _mm_subs_epu16(b_128, limiter);
_mm_store_si128(outR_128++, r_128);
_mm_store_si128(outG1_128++, g1_128);
_mm_store_si128(outG2_128++, g2_128);
_mm_store_si128(outB_128++, b_128);
}
G = (PIXEL16U *)G_128;
RG = (PIXEL16U *)RG_128;
BG = (PIXEL16U *)BG_128;
GD = (PIXEL16U *)GD_128;
outR = (PIXEL16U *)outR_128;
outG1 = (PIXEL16U *)outG1_128;
outG2 = (PIXEL16U *)outG2_128;
outB = (PIXEL16U *)outB_128;
}
#endif
for(; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
//Red-grn phase
*outR++ = r>>6;
*outG1++ = g1>>6;
*outG2++ = g2>>6;
*outB++ = b>>6;
}
}
}
// Pack the rows of Bayer data (full resolution progressive) into BYR4 format?
else if (format == DECODED_FORMAT_BYR4)
{
int bayer_format = decoder->cfhddata.bayer_format;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
//int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
int32_t r, g, b, rg, bg, gd, g1, g2;
// The output of the inverse transform is unsigned 16-bit integers
const int midpoint = 32768;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - midpoint;
r = ((rg - midpoint)<<1) + g;
b = ((bg - midpoint)<<1) + g;
g1 = g + gd;
g2 = g - gd;
r = SATURATE_16U(r);
g1 = SATURATE_16U(g1);
g2 = SATURATE_16U(g2);
b = SATURATE_16U(b);
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
default:
// Unsupported Bayer format
assert(0);
*outA16++ = 0;
*outA16++ = 0;
*outB16++ = 0;
*outB16++ = 0;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
for(y=2; y<info->height-3; y++)
{
//int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
// Linear restore
{
unsigned short *buff = (unsigned short *)output;
//static int pos = 0;
for(y=0; y<info->height*2; y++)
{
for(x=0; x<info->width*2; x++)
{
float val = (float)buff[y*info->width*2 + x]/65535.0f;
float encode_curvebase = 90.0;
int encode_curve_type = CURVE_TYPE_LOG;
int encode_curve_neg;
if((decoder->cfhddata.encode_curve)>>16) //1 or 2
{
encode_curve_type = (decoder->cfhddata.encode_curve)>>16;
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
if(encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR)
encode_curve_type = CURVE_TYPE_LINEAR;
encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = CURVE_LOG2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_GAMMA:
val = CURVE_GAM2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINEON:
val = CURVE_CINEON2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINE985:
val = CURVE_CINE9852LIN(val,encode_curvebase);
break;
case CURVE_TYPE_PARA:
val = CURVE_PARA2LIN(val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff));
break;
case CURVE_TYPE_CSTYLE:
val = CURVE_CSTYLE2LIN((float)val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff));
break;
case CURVE_TYPE_SLOG:
val = CURVE_SLOG2LIN((float)val);
break;
case CURVE_TYPE_LOGC:
val = CURVE_LOGC2LIN((float)val);
break;
case CURVE_TYPE_LINEAR:
default:
break;
}
buff[y*info->width*2 + x] = (int)(val*4095.0);
}
}
}
}
else
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[8192*3],*sptr;
//unsigned short scanline2[8192*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
uint8_t *outyuv,*line = output;
PIXEL *bayerptr;
int x,y;
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
__m128i gggggggg,ggggggg2,rgrgrgrg,bgbgbgbg;
__m128i rrrrrrrr,bbbbbbbb;
__m128i mid8192 = _mm_set1_epi16(8192);
__m128i mid16384 = _mm_set1_epi16(16384);
__m128i mid32768 = _mm_set1_epi16(32768);
__m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff-0x3fff);
int sse2width = info->width & 0xfff8;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
sptr = scanline;
x = 0;
for(; x<sse2width; x+=8)
{
gggggggg = _mm_loadu_si128((__m128i *)G); G+=8;
rgrgrgrg = _mm_loadu_si128((__m128i *)RG); RG+=8;
bgbgbgbg = _mm_loadu_si128((__m128i *)BG); BG+=8;
ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned
rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned
bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned
rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767
bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767
//limit to 0 to 16383
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16);
rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16);
//limit to 0 to 16383
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16);
bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16);
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535
*sptr++ = _mm_extract_epi16(rrrrrrrr, 0);
*sptr++ = _mm_extract_epi16(gggggggg, 0);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 0);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 1);
*sptr++ = _mm_extract_epi16(gggggggg, 1);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 1);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 2);
*sptr++ = _mm_extract_epi16(gggggggg, 2);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 2);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 3);
*sptr++ = _mm_extract_epi16(gggggggg, 3);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 3);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 4);
*sptr++ = _mm_extract_epi16(gggggggg, 4);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 4);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 5);
*sptr++ = _mm_extract_epi16(gggggggg, 5);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 5);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 6);
*sptr++ = _mm_extract_epi16(gggggggg, 6);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 6);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 7);
*sptr++ = _mm_extract_epi16(gggggggg, 7);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 7);
}
for(; x<info->width; x++)
{
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
if(r < 0) r = 0; if(r > 0xffff) r = 0xffff;
if(g < 0) g = 0; if(g > 0xffff) g = 0xffff;
if(b < 0) b = 0; if(b > 0xffff) b = 0xffff;
*sptr++ = r;
*sptr++ = g;
*sptr++ = b;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch,
info->format, whitebitdepth, flags);
}
line += pitch;
bayer_line += bayer_pitch;
}
#endif
}
/* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201
// Pack the rows of Bayer data (full resolution progressive) into BYR2 format?
else if (format == DECODED_FORMAT_YUYV)
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale);
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
bayer_line = decoder->RawBayer16;
scale = 16384.0;
//_mm_empty(); // Clear the mmx register state
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale * 4.0);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale * 4.0);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale * 4.0);
scale = 4096.0;
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else //RGBs
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
r_rmult = (mtrx[0][0]) * scale * whitebalance[0];
r_gmult = (mtrx[0][1]) * scale * whitebalance[1];
r_bmult = (mtrx[0][2]) * scale * whitebalance[2];
r_offset= (mtrx[0][3]) * scale;
g_rmult = (mtrx[1][0]) * scale * whitebalance[0];
g_gmult = (mtrx[1][1]) * scale * whitebalance[1];
g_bmult = (mtrx[1][2]) * scale * whitebalance[2];
g_offset= (mtrx[1][3]) * scale;
b_rmult = (mtrx[2][0]) * scale * whitebalance[0];
b_gmult = (mtrx[2][1]) * scale * whitebalance[1];
b_bmult = (mtrx[2][2]) * scale * whitebalance[2];
b_offset= (mtrx[2][3]) * scale;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = RG + bayer_pitch/4;
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 127);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
// g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO : need on convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
//g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO: Need to convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO: Need to convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
line += pitch;
bayer_line += bayer_pitch;
}
}
*/
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
int precision = codec->precision;
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*info->height*num_channels*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*info->height*num_channels*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*info->height*num_channels*sizeof(PIXEL);
}
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int frame_size = info->width*decoded_height*4*3*sizeof(PIXEL);
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
frame_size = info->width*decoded_height*4*4*sizeof(PIXEL);
#if _ALLOCATOR
{
ALLOCATOR *allocator = decoder->allocator;
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, frame_size, 16);
}
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
decoder->RGBFilterBufferSize = frame_size;
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
//TODO: Replace this memory allocation with a scratch buffer allocation
if(decoder->RawBayer16)
{
uint8_t *outyuv,*line, *source_line;
PIXEL16U *bayerptr;
PIXEL16U *G,*RG,*BG;
int x,y;
int src_pitch = info->width*num_channels*sizeof(PIXEL);
int y_rmult,y_gmult,y_bmult,y_offset;//shift=8;
int u_rmult,u_gmult,u_bmult,u_offset;
int v_rmult,v_gmult,v_bmult,v_offset;
float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5}
};
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, src_pitch,
info, chroma_offset, precision);
#else
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, src_pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
if (format == DECODED_FORMAT_YUYV)
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 256.0;
y_rmult = (int)((rgb2yuv[0][0]));
y_gmult = (int)((rgb2yuv[0][1]));
y_bmult = (int)((rgb2yuv[0][2]));
y_offset= (int)((rgb2yuv[0][3]));
u_rmult = (int)((rgb2yuv[1][0]));
u_gmult = (int)((rgb2yuv[1][1]));
u_bmult = (int)((rgb2yuv[1][2]));
u_offset= (int)((rgb2yuv[1][3]));
v_rmult = (int)((rgb2yuv[2][0]));
v_gmult = (int)((rgb2yuv[2][1]));
v_bmult = (int)((rgb2yuv[2][2]));
v_offset= (int)((rgb2yuv[2][3]));
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
source_line += src_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 16384.0;
y_rmult = (int)((rgb2yuv[0][0]) * scale);
y_gmult = (int)((rgb2yuv[0][1]) * scale);
y_bmult = (int)((rgb2yuv[0][2]) * scale);
y_offset= (int)((rgb2yuv[0][3]) * scale * 4.0f);
u_rmult = (int)((rgb2yuv[1][0]) * scale);
u_gmult = (int)((rgb2yuv[1][1]) * scale);
u_bmult = (int)((rgb2yuv[1][2]) * scale);
u_offset= (int)((rgb2yuv[1][3]) * scale * 4.0f);
v_rmult = (int)((rgb2yuv[2][0]) * scale);
v_gmult = (int)((rgb2yuv[2][1]) * scale);
v_bmult = (int)((rgb2yuv[2][2]) * scale);
v_offset= (int)((rgb2yuv[2][3]) * scale * 4.0f);
scale = 4096.0;
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
source_line += src_pitch;
}
}
else //RGBs
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
unsigned short *rgb16 = (unsigned short *)line;
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 255);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
else if(info->format == DECODED_FORMAT_RG48)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
G1 = (*G++);
R1 = (*RG++);
B1 = (*BG++);
*rgb16++ = R1;
*rgb16++ = G1;
*rgb16++ = B1;
}
}
line += pitch;
source_line += src_pitch;
}
}
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else // Output the frame in one of the RGB 8-bit formats
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
// Invert the bottom wavelet and convert the output to the requested color format
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
#else
TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
}
}
#if TIMING
// Count the number of progressive frames that were decoded
progressive_decode_count++;
#endif
}
STOP(tk_inverse);
#ifdef ADOBE_MEMORY_FUNCTIONS
if((decoder->RawBayer16 && decoder->RawBayerSize > 2048*1152*2) ||
(decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048*1152*2))
{
#if _ALLOCATOR
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#else
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#endif
}
#endif
#if (0 && DEBUG)
if (logfile) {
//uint8_t *subimage = output;
uint8_t *subimage = output + (2 * info->width) - 16;
DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Exit ReconstructFrameToBuffer\n");
}
#endif
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
}
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size)
{
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out1_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out2_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *bufptr = (PIXEL *)buffer;
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Check that there is enough space for the intermediate results from each channel
assert(output_width * sizeof(PIXEL) < buffer_size);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[3];
IMAGE *high_wavelet = transform_array[channel]->wavelet[2];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
}
for (row = 0; row < output_height; row++)
{
char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
// Invert the temporal transform at quarter resolution
InvertTemporalQuarterRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel]);
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
// Pack the intermediate results into the output row
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width);
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// The pixels are descaled in the inverse temporal transform
//const int descale = 0;
// Shift the intermediate results to 16-bit pixels
const int shift_yu64 = 8;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Initialize a pointer for allocating space in the buffer
PIXEL *bufptr = (PIXEL *)buffer;
// Array of pointers to the start of each channel in the intermediate results
PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS];
// Check that there is enough space for the intermediate results from each channel
#if DEBUG
assert(output_width * sizeof(PIXEL) < buffer_size);
#endif
ComputeCube(decoder);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[4];
IMAGE *high_wavelet = transform_array[channel]->wavelet[3];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Force the row of intermediate results to be properly aligned
bufptr = (PIXEL *)ALIGN16(bufptr);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
// Check that the row of intermediate results is properly aligned
assert(ISALIGNED16(channel_row_ptr[channel]));
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
//HACK: Seems to work, I don't know why. //DAN20070304
if (precision == 12) precision = 8;
// Apply the inverse temporal transform to the lowpass and highpass rows
for (row = 0; row < output_height; row++)
{
// Most of the color conversion routines use zero descaling
int descale = 0;
//char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
if (frame_index == 0)
{
// Invert the temporal transform at quarter resolution to get the even row
InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
else
{
assert(frame_index == 1);
// Invert the temporal transform at quarter resolution to get the odd row
InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
if(decoder->use_active_metadata_decoder)
{
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int i;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
info2.height = 1;
for(i=0;i<num_channels;i++)
{
channeldata[i] = (uint8_t *)channel_row_ptr[i];
channelpitch[i] = 0;
}
#if 1
{
__m128i *Y = (__m128i *)channeldata[0];
__m128i *U = (__m128i *)channeldata[1];
__m128i *V = (__m128i *)channeldata[2];
__m128i v;
int x;
__m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff);
for(x=0;x<info->width;x+=8)
{
v = _mm_load_si128(Y);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(Y++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(U);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(U++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(V);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(V++, v);
}
}
#else
//non SSE2
for(x=0;x<info->width*2;x++)
{
int val = *gptr++;
if(val < 0) val = 0;
if(val > 4095) val = 4095;
val <<= 4;
*src++ = val;
}
src = scanline2;
#endif
Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch,
decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch);
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert the rows of luma and chroma into the output format
switch(format)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
// Pack the intermediate results into the output row
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
assert(0);//need quarter res BAYER To YUV decoder
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, info->colorspace, format);
}
else
{
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format);
}
break;
case COLOR_FORMAT_RGB24:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL,
output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width,
shift_yu64, precision, format);
break;
case COLOR_FORMAT_B64A:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, COLOR_FORMAT_B64A, color_space);
}
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format);
}
#endif
assert(0);
break;
}
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
#if 0
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *wavelet = transform_array[channel]->wavelet[1];
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
for (row = 0; row < output_height; row++)
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision);
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the wavelets with quarter resolution
const int wavelet_index = 1;
IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index];
// The wavelet should have been reconstructed
assert(wavelet != NULL);
// The lowpass band should be valid
assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0);
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
ComputeCube(decoder);
//HACK DAN20110122 -- some formats will not directly decode so need to use the AM route
{
if( format == COLOR_FORMAT_YU64 ||
format == COLOR_FORMAT_V210 ||
format == COLOR_FORMAT_R408 ||
format == COLOR_FORMAT_V408)
{
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
}
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_row_ptr;
mailbox->pitch = output_pitch;
mailbox->framenum = 0;
for(channel = 0; channel < num_channels; channel++)
{
mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel];
mailbox->channelpitch[channel] = input_pitch[channel]*sizeof(PIXEL);
}
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert each row to the specified output format
for (row = 0; row < output_height; row++)
{
// Right shift for converting lowpass coefficients to pixels
int descale = 4;
switch(format & 0x7fffffff)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, info->colorspace, format);
}
else
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width,
precision, format);
}
break;
case COLOR_FORMAT_RGB24:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
case COLOR_FORMAT_RGB32_INVERTED:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3],
output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0, num_channels);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
//TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122
//
}
else
{
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format);
}
break;
case COLOR_FORMAT_B64A:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
assert(0);
break;
}
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
}
// Release all resources allocated by the decoder
void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms)
{
#if _TIMING && 0
FILE *logfile = decoder->logfile;
uint32_t frame_count = decoder->frame_count;
if (logfile != NULL && frame_count > 0)\
{
#ifdef _WINDOWS
PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME);
#else
PrintStatistics(logfile, frame_count, NULL, NULL);
#endif
}
#endif
// Free the data structures allocated for decoding
ClearDecoder(decoder);
}
void DecodeForceMetadataRefresh(DECODER *decoder)
{
CFHDDATA *cfhddata = &decoder->cfhddata;
cfhddata->force_metadata_refresh = true;
if (decoder->parallelDecoder) {
cfhddata = &decoder->parallelDecoder->cfhddata;
cfhddata->force_metadata_refresh = true;
}
}
void SetDecoderFlags(DECODER *decoder, uint32_t flags)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Set the decoder flags
decoder->flags = flags;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
}
void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution)
{
// Need to modify the codec to use the decoding format
decoder->frame.width = width;
decoder->frame.height = height;
if(format == DECODED_FORMAT_WP13)
{
decoder->frame.output_format = format;
//decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively.
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else if(format == DECODED_FORMAT_W13A)
{
decoder->frame.output_format = format;
// decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else
{
decoder->frame.output_format = format;
decoder->frame.format = format;
//decoder->frame.signed_pixels = 0;
decoder->frame.white_point = 16;
}
decoder->frame.resolution = resolution;
decoder->frame.pixel_size = PixelSize(decoder->frame.format);
}
void SetDecoderCapabilities(DECODER *decoder)
{
int processor_count;
#ifdef _WINDOWS
int limit_cpus = 32;
#else
int limit_cpus = 32; // AJA spins off too many
#endif
// Set the capabilities that are most likely supported by the Intel Mac
decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2);
if (decoder->thread_cntrl.limit)
{
limit_cpus = decoder->thread_cntrl.limit;
}
else if (decoder->thread_cntrl.affinity)
{
int i;
const int max_cpu_count = 32;
limit_cpus = 0;
for (i = 0; i < max_cpu_count; i++)
{
if (decoder->thread_cntrl.affinity & (1<<i)) {
limit_cpus++;
}
}
}
// Set the number of processors
processor_count = GetProcessorCount();
if(processor_count > limit_cpus)
processor_count = limit_cpus;
#if (0 && DEBUG)
// Set the number of processors (for debugging)
//processor_count = 8;
processor_count = 1;
fprintf(stderr, "Limit processors to %d\n", processor_count);
#endif
decoder->thread_cntrl.capabilities |= (processor_count << 16);
}
int GetDecoderCapabilities(DECODER *decoder)
{
return decoder->thread_cntrl.capabilities;
}
bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags)
{
if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE)
{
decoder->frame.colorspace = color_flags;
// Indicate that the color flags were set as specified
return true;
}
// The specified color flags were not valid
return false;
}
// Compute the resolution corresponding to the specified combination of input and output dimensions
int DecodedResolution(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width;
int decoded_height;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
if (output_width == input_width && output_height == input_height) {
return DECODED_RESOLUTION_FULL;
}
// Compute the dimensions for half resolution decoding
decoded_width = input_width / 2;
decoded_height = input_height / 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_HALF;
}
// Compute the dimensions for quarter resolution decoding
decoded_width /= 2;
decoded_height /= 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_QUARTER;
}
return DECODED_RESOLUTION_UNSUPPORTED;
}
// Compute the decoded resolution that is closest to the output dimensions
int DecodedScale(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width = input_width;
int decoded_height = input_height;
static int decodedResolution[] =
{
DECODED_RESOLUTION_FULL,
DECODED_RESOLUTION_HALF,
DECODED_RESOLUTION_QUARTER
};
int reduction = 0;
int max_reduction = 2;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
#if 1
// Always decode to the next larger size
while (decoded_width > output_width &&
decoded_height > output_height &&
reduction < max_reduction)
{
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
}
#else
while (decoded_width*4 > output_width*5 &&
decoded_height*4 > output_height*5 &&
reduction < max_reduction)
{
#if 0
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
#else
// Better to scale up a smaller image than scale down a larger image
decoded_width /= 2;
decoded_height /= 2;
reduction++;
#endif
}
#endif
// Check that the decoded resolution is valid
assert(0 <= reduction && reduction <= max_reduction);
return decodedResolution[reduction];
}
void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution,
int *decoded_width_out, int *decoded_height_out)
{
switch (decoded_resolution)
{
default:
assert(0);
case DECODED_RESOLUTION_FULL:
*decoded_width_out = encoded_width;
*decoded_height_out = encoded_height;
break;
case DECODED_RESOLUTION_HALF:
*decoded_width_out = encoded_width / 2;
*decoded_height_out = encoded_height / 2;
break;
case DECODED_RESOLUTION_QUARTER:
*decoded_width_out = encoded_width / 4;
*decoded_height_out = encoded_height / 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
//TODO: Check that the lowpass dimensions are correct
*decoded_width_out = encoded_width / 8;
*decoded_height_out = encoded_height / 8;
break;
}
}
// Return true if the specified resolution is supported
bool IsDecodedResolution(int resolution)
{
if (resolution == DECODED_RESOLUTION_QUARTER) {
return true;
}
return (resolution == DECODED_RESOLUTION_FULL ||
resolution == DECODED_RESOLUTION_HALF);
}
// Return true if the encoded sample is a key frame
bool IsSampleKeyFrame(uint8_t *sample, size_t size)
{
bool key_frame_flag = false;
// Search the first twenty tags for the sample type
const int num_tags = 20;
int i;
BITSTREAM bitstream;
InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ);
for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE))
{
TAGVALUE segment = GetSegment(&bitstream);
if (segment.tuple.tag == CODEC_TAG_SAMPLE)
{
switch (segment.tuple.value)
{
case SAMPLE_TYPE_GROUP:
case SAMPLE_TYPE_FIRST:
case SAMPLE_TYPE_IFRAME:
key_frame_flag = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
case SAMPLE_TYPE_FRAME:
case SAMPLE_TYPE_SECOND:
case SAMPLE_TYPE_PFRAME:
default:
key_frame_flag = false;
break;
case SAMPLE_TYPE_GROUP_TRAILER:
case SAMPLE_TYPE_NONE:
case SAMPLE_TYPE_ERROR:
case SAMPLE_TYPE_CHANNEL:
assert(0); // Unexpected situation
key_frame_flag = false; // Report the sample as a non-key frame
break;
}
break; // Found the sample type
}
}
return key_frame_flag;
}
// Return the number of the more recent decoded frame
uint32_t DecodedFrameNumber(DECODER *decoder)
{
CODEC_STATE *codec = &decoder->codec;
if (decoder == NULL) return 0;
return codec->frame_number;
}
/***** Start of the new code for the finite state machine (FSM) decoder *****/
#if _PROCESSOR_DISPATCH
__declspec(cpu_dispatch(Pentium_4,Generic))
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Stub routine for processor specific dispatch
}
#endif
#if _PROCESSOR_GENERIC
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Generic))
#endif
// This version assumes that the row is a multiple of 8 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
//assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 8 byte blocks
assert(ISALIGNED(length, 8));
// Convert the length from pixels to 8-byte blocks
count = (length >> 3);
// This code assumes that at least one 8-byte block will be zeroed
assert(count > 0);
__asm
{
pxor mm0, mm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 8-byte blocks
loop: movq [eax], mm0 // Write 8 bytes of zeros
add eax, 8 // Advance to the next 8 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
//_mm_empty();
}
#endif
#if _PROCESSOR_PENTIUM_4
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Pentium_4))
#endif
#ifndef _WIN64
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
// Convert the length from pixels to 16-byte blocks
count = (length >> 4);
// This code assumes that at least one 16-byte block will be zeroed
assert(count > 0);
#if 1 //DANREMOVE
memset(rowptr, 0, length);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 16-byte blocks
loop: movdqa [eax], xmm0 // Write 16 bytes of zeros
add eax, 16 // Advance to the next 16 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
#endif
}
#else
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
memset(rowptr, 0, length);
}
#endif
#endif
#if (0 && _DEBUG)
// Functions for the finite state machine decoder (debug version)
static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index)
{
// Return the address of the next table entry in the finite state machine
return &fsm->next_state[index];
}
static void ResetFSM(FSM *fsm)
{
// Reset the state to the beginning of the finite state machine entries
fsm->next_state = fsm->entries;
}
static void UpdateFSM(FSM *fsm, int next)
{
// Change the state pointer to the next block of table entries
fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE);
}
#else
// Macros for the finite state machine decoder
#if _INDIVIDUAL_LUT
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0]
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next]
#define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index]
#define ResetFSMIndividual(fsm) fsm->next_state_index = 0
#define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next
#else
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE)
#endif
#endif
#if _DEBUG
static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSM(FSM *fsm)
{
int num_entries = FSM_INDEX_ENTRIES;
int i;
for (i = 0; i < num_entries; i++)
{
FSMENTRY *entry = &fsm->table.entries[0][i];
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
}
}
static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
#endif
static inline int GetFastByte(BITSTREAM *stream)
{
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed--;
#endif
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
return byte;
}
#if 0
static inline int GetFastShort(BITSTREAM *stream)
{
// Adaptation of the code in GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(lpCurrentWord[0]);
int word = (byte << 8) | (uint32_t )(lpCurrentWord[1]);
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord+2;
// Check that the high bits are zero
assert((word & ~BITMASK(16)) == 0);
return word;
}
#endif
// Must declare the byte swap function even though it is an intrinsic
//int _bswap(int);
#if 0
static inline int GetFastLong(BITSTREAM *stream)
{
uint32_t *lpCurrentWord = (uint32_t *)stream->lpCurrentWord;
int word = *(lpCurrentWord)++;
//word = _bswap(word);
word = SwapInt32BtoN(word);
stream->lpCurrentWord = (uint8_t *)lpCurrentWord;
return word;
}
#endif
#if 0 //DAN20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = image;
int column = 0;
int32_t value;
size_t bytes_row_size = width * sizeof(PIXEL);
PIXEL *maxptr;
int length = width * sizeof(PIXEL);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
#if (0 && DEBUG)
zerorow_count = 0;
#endif
ZeroHighPassRow(rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
}
#endif
// Decode a subband of highpass coefficients using a finite state machine.
// One byte is read from the bitstream each time and decoded in two steps.
// New version that uses a buffer aligned to the cache for decoding.
#if 0
static inline void ZeroHighPassBuffer(PIXEL *ptrCacheLines, int numCacheLines)
{
// This routine assume that the cache line size is 64 bytes
assert(_CACHE_LINE_SIZE == 64);
// This routine assumes that the input pointer is aligned to a cache line
assert(ISALIGNED(ptrCacheLines, _CACHE_LINE_SIZE));
// This routine assumes that at least one cache line will be written
assert(numCacheLines > 0);
#if __GNUC__
memset(ptrCacheLines, 0, numCacheLines * _CACHE_LINE_SIZE);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, ptrCacheLines // Load the pointer to the memory block
mov ebx, numCacheLines // Load the count of the number of cache lines
loop: movdqa [eax], xmm0 // Write 64 bytes of zeros using aligned stores
movdqa [eax+16], xmm0
movdqa [eax+32], xmm0
movdqa [eax+48], xmm0
add eax, 64 // Advance to the next cache line
sub ebx, 1 // Decrement the number of cache lines
jg loop
}
#endif
// The routine returns the pointer to the cache line after zeroing the block
}
#endif
#if 0
static inline void CopyRowBuffer(char *rowptr, PIXEL *buffer, int length)
{
// Note that the length is in units of bytes (not pixels)
int count; // Number of 16-byte blocks to copy
// Check that the row length is an integer multiple of 16-byte blocks
assert(ISALIGNED(length, 16));
// Convert the row length to the number of 16-byte blocks to copy
count = length >> 4;
// This routine assumes that at least one 16 byte block will be copied
assert(count > 0);
#if __GNUC__
// Use standard memory copy
memcpy(rowptr, buffer, length);
#else
// Copy a multiple of 16 byte blocks
__asm
{
mov eax, rowptr // Load the pointer to the destination
mov ebx, buffer // Load the pointer to the source
mov ecx, count // Load the number of 16-byte blocks to copy
loop: movdqa xmm0, [ebx] // Load 16 bytes from the source
movntdq [eax], xmm0 // Copy 16 bytes to the destination
add eax, 16 // Advance to the group of 16 bytes
add ebx, 16
sub ecx, 1 // Decrement the number of blocks to copy
jg loop
}
#endif
}
#endif
// DecodeBandFSMBuffered is no longer used
#if 0 //dan20041030 not used
bool DecodeBandFSMBuffered(FSM *fsm, BITSTREAM *stream, PIXEL *image,
int width, int height, int pitch,
int quantization, char *decoding_buffer, size_t decoding_buffer_size)
{
char *rowptr = (char *)image; // Pointer to current row
char *maxptr = rowptr + height * pitch; // Address of row after the last row
FSMENTRY *entry;
int index;
int byte;
int column = 0;
int32_t value;
size_t row_size;
size_t cache_row_size; // Size of a row in bytes
int cache_line_count; // Size of the buffer in cache lines
PIXEL *buffer; // Pixel pointer to the buffer
int length; // Length of row in bytes
// Check that the processing size allows two chunks per byte
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
// The bitstream buffer should be empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Compute the number of cache lines used in the buffer
row_size = width * sizeof(PIXEL);
cache_row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
cache_line_count = (cache_row_size >> _CACHE_LINE_SHIFT);
// Check that the buffer is large enough
assert(decoding_buffer != NULL && decoding_buffer_size >= cache_row_size);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(decoding_buffer, _CACHE_LINE_SIZE));
// This routine assumes that the rows are contiguous and the pitch is a multiple of 16 bytes
length = pitch;
assert(length == ALIGN(row_size, 16));
// Cast the buffer pointer for pixel access
buffer = (PIXEL *)decoding_buffer;
// Zero the decoding buffer
ZeroHighPassBuffer(buffer, cache_line_count);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, entry->next_state);
// No magnitude values decoded?
if (entry->value0 == 0)
{
// No magnitudes decoded so just advance the column pointer
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// Only one magnitude value decoded?
else if (entry->value1 == 0)
{
// Process the magnitude value that was decoded
// Undo quantization and scaling
value = quantization * entry->value0;
// Advance to the column where the value should be placed
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
else // Two magnitude values were decoded
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width - 1) {
// Dequantize and store the first value
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
// Dequantize and store the second value
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
// Dequantize and store the first value in the current row
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
// Dequantize the second value
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
// Store the second value in the new row
buffer[column++] = SATURATE(value);
}
}
// Decode the second 4-bit chunk
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width-1) {
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
buffer[column++] = SATURATE(value);
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM, combine the two results decoded from one byte
bool DecodeBandFSMCombined(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, skip;
uint8_t byte;
FSMENTRY *entry1, *entry2;
PIXEL *rowptr = image;
int row = 0, column = 0;
int32_t value,bytes_row_size = width*sizeof(PIXEL);
PIXEL *maxptr = rowptr + height*pitch;
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
ZeroHighPassRow(rowptr, width);
// Double check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
//byte = GetBits(stream, BITSTREAM_WORD_SIZE);
#if 0
byte = GetByte(stream);
if (stream->error != BITSTREAM_ERROR_OKAY) {
stream->error = VLC_ERROR_NOTFOUND;
return false;
}
#else
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
entry1 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry1->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
entry2 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry2->next_state);
// Return when the subband is completely decoded
if(entry1->value0 == BAND_END_TRAILER || entry2->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// If no magnitude value is decoded at the first step
if (entry1->value0 == 0) {
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if(entry2->value1 == 0) {
// Skip to the non-zero position
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else {
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If only one magnitude is decoded at the first step
else if(entry1->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry1->value0;
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if (entry2->value1 == 0)
{
// Undo quantization and scaling
value = quantization * entry2->value0;
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else
{
column += entry1->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If two magnitudes are decoded at the first step
else {
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry1->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry1->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry1->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry1->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
// If two magnitudes are decoded at the first step
// then at most one more magnitude can be decoded at the second step
assert(entry2->value1 == 0);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry2->pre_skip; // entry2->pre_skip <=4 must be true
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else {
column += entry2->pre_skip; // must be a small zero run
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM8s(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
int column = 0;
int32_t value;
PIXEL8S *maxptr;
int length = width * sizeof(PIXEL8S);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
ZeroHighPassRow((PIXEL *)rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
}
}
#endif
// same as DecodeBandFSM8sNoGap but output to 16bit data
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = (PIXEL *)image;
PIXEL16S *bandendptr;
int value;
#if ERROR_TOLERANT
uint8_t *startCurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#endif
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
if (image == NULL) {
return false;
}
// Reset the decoder
ResetFSM(fsm);
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
SecondPass:
rowptr = (PIXEL16S *)image;
AlignBits(stream);
AlignBitsTag(stream);
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// Same as DecodeBandFSM8sNoGap but output to 16bit data
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile)
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch)
#endif
{
int index, byte;
FSMENTRY *entry;
FSMENTRYFAST *entryfast;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
ptrdiff_t offset;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
#if (0 && DEBUG)
DebugOutputBitstreamPosition(stream);
DebugOutputBitstreamBytes(stream, 16);
#endif
// Reset the decoder
ResetFSM(fsm);
#if (0 && DEBUG)
DebugOutputFSM(fsm);
#endif
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
//memset(rowptr, 0, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 500;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the appropriate distance
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
}
offset = CurrentWord - startCurrentWord;
stream->lpCurrentWord += offset;
stream->nWordsUsed -= (int)offset;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(bandendptr >= rowptr)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
#if (0 && DEBUG)
PrintBitstreamPosition(stream, logfile);
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
// Reset the decoder
ResetFSM(fsm);
//This is been called with non-prequantized FSM
if(quant>1) level /= quant;
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 1000;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord);
stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord));
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(((intptr_t)bandendptr - (intptr_t)rowptr) >= 0)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// This version of DecodeBandFSM() assumes that the gap between width and pitch has been coded as
// zero runs. Therefore decoded magnitude values can be written down without the need to check
// if the end of a row has been reached. Hence the total number of conditionals in DecodeBandFSM
// can be significantly reduced.
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
#if !_INDIVIDUAL_ENTRY
#if 0 //dan20041030 not used
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
//while (rowptr < bandendptr)
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
#elif _SINGLE_FSM_TABLE
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte, i;
FSMENTRY *entry,*firstentry = fsm->table->firstentry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#else
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if 1
__declspec(align(4)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if(entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if (entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
// Decode the highpass band coefficients but do not write them out - used in SIF mode
bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
pitch /= sizeof(PIXEL8S);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
}
}
#if _TIMING
extern TIMER tk_fastruns;
#endif
#if 0 //dan20041030 not used
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
bool DecodeFastRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
int row = 0;
int pitch;
int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(pixel_type == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
//pitch = wavelet->pitch;
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
// Get one byte from the bitstream and decode 4 bits at a time
result = DecodeBandFSM8sNoGap(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "DecodeFastRunsFSM8s, band index: %d\n", band_index);
DumpWaveletRow(wavelet, band_index, 0, logfile);
}
#endif
end:
STOP(tk_fastruns);
return true;
}
#endif
#if _DEQUANTIZE_IN_FSM
void ReQuantFSM(FSM *fsm, int quant)
{
int count = 0;
int i, j;
short *restore = &fsm->restoreFSM[0];
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
entry[j].value0 = restore[count++];
entry[j].value1 = restore[count++];
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm_table.entries_ind[i];
if(entry)
{
entry->value0 = restore[count++];
entry->value1 = restore[count++];
}
}
#endif
}
void DeQuantFSM(FSM *fsm, int quant)
{
int i, j;
if(fsm->LastQuant > 1 && fsm->LastQuant != quant)
{
ReQuantFSM(fsm, fsm->LastQuant);
}
else if(fsm->LastQuant == quant)
{
return;
}
if(fsm->InitizedRestore == 0)
{
short *restore = &fsm->restoreFSM[0];
int count = 0;
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
restore[count++] = entry[j].value0;
restore[count++] = entry[j].value1;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
restore[count++] = entry->value0;
restore[count++] = entry->value1;
}
}
#endif
fsm->InitizedRestore = 1;
}
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
if(entry[j].value0 < 0x7ff0) // band end trailer
entry[j].value0 *= quant;
entry[j].value1 *= quant;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
if(entry->value0 < 0x7ff0) // band end trailer etc
entry->value0 *= quant;
entry->value1 *= quant;
}
}
#endif
fsm->LastQuant = quant;
}
#endif // _DEQUANTIZE_IN_FSM
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
//dan 7-11-03
bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height, int threading)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
int active_codebook = decoder->codec.active_codebook;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
int peaklevel = 0;
//int peaksize = 0;
PIXEL *peakbase = NULL;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n",
subband, decoder->codec.active_codebook, difference_coding);
}
#endif
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
// All rows are treated as one long row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
//pitch = wavelet->pitch8s; // Use the 8-bit pitch
pitch = wavelet->pitch;
peaklevel = codec->peak_table.level;
peakbase = codec->peak_table.base;
#if _THREADED
threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0;
if(threading)
{
decoder->entropy_worker_new.threads_used = 1;
{
//int start = stream->nWordsUsed;
int end;
struct entropy_data_new *data;
int next_queue_num = decoder->entropy_worker_new.next_queue_num++;
data = &decoder->entropy_worker_new.entropy_data[next_queue_num];
memcpy(&data->stream,stream, sizeof(BITSTREAM));
data->rowptr = rowptr;
data->width = width;
data->height = height;
data->pitch = pitch;
data->peaks = peakbase;
data->level = peaklevel;
data->quant = quant;
data->wavelet = wavelet;
data->band_index = band_index;
data->active_codebook = active_codebook;
data->difference_coding = difference_coding;
// Start only a particular threadid
if(next_queue_num == 0)
{
ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1);
#if _DELAYED_THREAD_START==0
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
}
else
{ // Set the work count to the number of rows to process
ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1);
}
{
unsigned short tag = *(stream->lpCurrentWord-8) << 8;
if(tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE))
{
int chunksize;
int value = *(stream->lpCurrentWord-6) << 8;
value |= *(stream->lpCurrentWord-5);
tag |= *(stream->lpCurrentWord-7);
tag = NEG(tag);
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
chunksize *= 4;
chunksize -= 8;
{
uint32_t *ptr = (uint32_t *)stream->lpCurrentWord;
ptr += (chunksize>>2);
if(*ptr != 0x00003800) // bandend
{
goto continuesearch;
}
}
stream->lpCurrentWord += chunksize;
stream->nWordsUsed -= chunksize;
end = stream->nWordsUsed;
}
else
{
continuesearch:
while(*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend
{
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
}
end = stream->nWordsUsed;
}
}
}
}
else
#endif // _THREADED
{
DeQuantFSM(fsm, quant);
if (peaklevel)
{
result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1);
}
else
{
#if _DEBUG
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile);
#else
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch);
#endif
}
if(difference_coding)
{
int x,y;
PIXEL *line = rowptr;
for(y=0;y<height;y++)
{
for(x=1;x<width;x++)
{
line[x] += line[x-1];
}
line += pitch/2;
}
}
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band_index);
}
}
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//end:
STOP(tk_fastruns);
return true;
}
bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
//int row = 0;
int pitch;
//int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is 8bit/pixel
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
#if 1 // Get one byte from the bitstream and decode 4 bits at a time
result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
//end:
STOP(tk_fastruns);
return true;
}
// The third version is also based on the finite state machine decoder with
// gaps between rows encoded as zero runs, but dequantization is performed as
// the highpass values are read from the bitstream and placed into a row buffer.
// The highpass values are not written into the wavelet highpass band.
// Eventually this routine will be merged into the routine DecodeTemporalBand8s
// since this routine contains code specific to the inverse temporal transform
// and DecodeTemporalBand8s has become a shell.
#if 0
bool DecodeBandRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height,
IMAGE *frame0, IMAGE *frame1)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm;
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *lowpass = wavelet->band[0];
int lowpass_pitch = wavelet->pitch;
//PIXEL8S *rowptr;
int row = 0;
int pitch;
int row_width; // Width of the encoded row of highpass coefficients
PIXEL *even = frame0->band[0];
PIXEL *odd = frame1->band[0];
int even_pitch = frame0->pitch;
int odd_pitch = frame1->pitch;
int pixel_type = wavelet->pixel_type[band_index];
int quantization = wavelet->quantization[band_index];
PIXEL *buffer;
size_t buffer_size;
int index, byte;
FSMENTRY *entry;
int column = 0;
int32_t value;
int buffer_row_size;
PIXEL *highpass;
// Check that the wavelet into which the band will be decoded is valid
assert(wavelet != NULL);
if (wavelet == NULL) return false;
// Check that the finite state machine is valid
assert(fsm != NULL);
if (fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check that the band was encoded using 8-bit signed coefficients
assert(pixel_type == PIXEL_TYPE_8S);
pitch = wavelet->pitch8s; // Use the pitch for 8-bit packed rows
// Get the buffer for storing one row of dequantized highpass coefficients
buffer = (PIXEL *)decoder->buffer;
buffer_size = decoder->buffer_size;
// The finite state machine does not support a marker at the end of each row
assert(RUNS_ROWEND_MARKER == 0);
/***** Start of code included from DecodeBandFSM8s() *****/
// Check that one byte can be processes as two 4-bit nibbles
assert(BITSTREAM_WORD_SIZE == (2 * FSM_INDEX_SIZE));
// Check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
buffer_row_size = pitch * sizeof(PIXEL);
lowpass_pitch /= sizeof(PIXEL);
even_pitch /= sizeof(PIXEL);
odd_pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
//maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
//row_size = ALIGN16(row_size);
// Check that the buffer is large enough to hold one row
//assert(buffer_size >= row_size);
assert(buffer_size >= buffer_row_size);
// Use the buffer for the row or highpass coefficients
highpass = buffer;
#if 1
// The row spans the allocated width (pitch) of the band in no gap mode
row_width = pitch;
#else
// For debugging
row_width = wavelet->encoded_pitch/sizeof(PIXEL8S);
#endif
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
// Decode zero runs and magnitude values (with appended sign bit)
// until the marker for the band end trailer has been decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
/***** Decode the first 4-bit nibble *****/
// Decode the first 4-bit nibble
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < row_width);
// Dequantize the value and store it in the highpass row buffer
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
value = entry->value0;
highpass[column] = quantization * value;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
/***** Decode the second 4-bit nibble *****/
// Decode the second 4-bit nibble
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < row_width);
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both highpass values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
highpass[column] = quantization * entry->value0;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
}
/***** End of the code included from DecodeBandFSM8s() *****/
#if 0
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if 0
end:
return true;
#endif
}
#endif
/***** End of the code for the finite state machine decoder *****/
#if 1
// The second version applies the horizontal inverse filters row by row, so the
// memory access pattern is more efficient. The lowpass and highpass temporal
// coefficients for each row are inverted and packed into the output in one pass.
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
#if DEBUG
assert((2 * num_channels * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
//DAN20051004 -- possible reversiblity issue
//InvertHorizontalRow8sBuffered //----------------------- Maybe bad
InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
uint8_t *output_row_ptr = output;
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Divide the buffer space between the four threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
output += field_pitch * (half_height - 1);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // what about middle threads?
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
*/
// Loop until all of the rows have been processed
for (;;)
{
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * 2 * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
//PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size);
// assert(0 <= row && row < half_height);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n",
row, thread_index, output_row_ptr);
}
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
#if (0 && DEBUG)
// Invert the horizontal transform by duplicating the lowpass pixels
InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel], horizontal_width[channel],
(PIXEL *)line_buffer);
#else
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
#endif
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
//horizontal_lowlow[channel] += pitch;
//horizontal_lowhigh[channel] += pitch;
//horizontal_highlow[channel] += pitch;
//horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the input transforms
//row += row_step;
// Advance to the next row in the packed output image
//output += field_pitch;
}
else
{
// No more rows to process
break;
}
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
//#if BUILD_PROSPECT
// Apply the inverse horizontal-temporal transform and output rows of luma and chroma
#if 0
void TransformInverseFrameToRow16u(TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset,
int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
#if DEBUG
assert((2 * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_row_ptr = output_buffer;
PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS];
int planar_pitch[TRANSFORM_MAX_CHANNELS];
ROI strip = {luma_width, 2};
uint8_t *yuv_output = (uint8_t *)output;
uint8_t *output1 = yuv_output;
uint8_t *output2 = yuv_output + output_pitch;
#else
PIXEL16U *output_row_ptr = output;
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
else
{
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
//***DEBUG***
//ZeroMemory(temporal_highpass, temporal_row_size);
//FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
#if (1 && DEBUG_ROW16U)
// Write the rows of 16-bit pixels to a temporary buffer
planar_output[channel] = output_row_ptr;
planar_pitch[channel] = output_pitch * sizeof(PIXEL);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
planar_output[channel], planar_pitch[channel],
output_row_width[channel],
frame_width, chroma_offset, precision);
//if (channel > 0)
if (0)
{
uint8_t *output3 = (uint8_t *)planar_output[channel];
uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel];
int output_size = output_row_width[channel] * sizeof(PIXEL);
int fill_value = (128 << 8);
//ZeroMemory(output3, output_size);
//ZeroMemory(output4, output_size);
FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value);
FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value);
}
#else
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
#endif
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
// Check the output row alignment
assert(ISALIGNED16(output_row_ptr));
}
// Advance to the next group of rows in the output image
output += field_pitch/sizeof(PIXEL16U);
}
}
//#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
#if 0
if (thread_index == 1)
{
// Skip over the buffer space used by the other thread
size_t buffer_usage = 2 * temporal_row_size;
buffer += buffer_usage;
buffer_size -= buffer_usage;
}
#else
// Divide the buffer space between the two threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
#endif
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
assert((2 * temporal_row_size) <= buffer_size);
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
//horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
//output += field_pitch * (half_height - 1);
output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U);
output_pitch = NEG(output_pitch);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // middle threads
}
*/
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
// Loop until all of the rows have been processed
for (;;)
{
PIXEL16U *output_row_ptr;
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
assert(0 <= row && row < half_height);
if(decoder->frame.resolution == DECODED_RESOLUTION_FULL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
else if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
}
else
{
// No more rows to process
break;
}
}
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
#if 0
DWORD WINAPI TransformInverseFrameToRow16utopThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16utop(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
DWORD WINAPI TransformInverseFrameToRow16ubottomThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16ubottom(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
#endif
extern void fast_srand( int seed );
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
#if 0
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
//int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
size_t temporal_buffer_size = 2 * num_channels * temporal_row_size;
#if DEBUG
size_t yuv_row_size = frame_width * 2;
#endif
char *yuv_buffer;
size_t yuv_buffer_size;
int field_pitch = 2 * output_pitch;
int format = frame->format;
bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32);
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Allocate buffer space for the intermediate YUV data
yuv_buffer = buffer + temporal_buffer_size;
yuv_buffer_size = buffer_size - temporal_buffer_size;
#if DEBUG
assert(yuv_buffer_size >= 2 * yuv_row_size);
#endif
if (inverted)
{
output += (frame_height - 1) * output_pitch;
output_pitch = (- output_pitch);
field_pitch = (- field_pitch);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
//#if BUILD_PROSPECT
if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64)
{
// Invert the temporal bands from all channels and pack as V210 output
InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, chroma_offset, precision);
}
else
//#endif
{
// Invert the temporal bands from all channels and pack as 8-bit output
InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, frame->colorspace,
chroma_offset, precision, row);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format)
{
bool inverted = false;
size_t output_size;
START(tk_convert);
// Determine the type of conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
default: // Unsupported format (return a blank frame)
assert(0);
output_size = image->height * output_pitch;
memset(output_buffer, COLOR_CHROMA_ZERO, output_size);
break;
}
STOP(tk_convert);
}
void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height,
int output_pitch, bool inverted)
{
IMAGE *y_image = images[0];
IMAGE *u_image = images[1];
IMAGE *v_image = images[2];
int width = y_image->width;
int height = output_height;
PIXEL *y_row_ptr = y_image->band[0];
PIXEL *u_row_ptr = u_image->band[0];
PIXEL *v_row_ptr = v_image->band[0];
int y_pitch = y_image->pitch/sizeof(PIXEL);
int u_pitch = u_image->pitch/sizeof(PIXEL);
int v_pitch = v_image->pitch/sizeof(PIXEL);
uint8_t *outrow = output_buffer;
uint8_t *outptr;
int row, column;
// Definitions for optimization
//const int column_step = 2 * sizeof(__m64);
// Column at which post processing must begin
//int post_column = width - (width % column_step);
// The output pitch should be a positive number before inversion
assert(output_pitch > 0);
// Should the image be inverted?
if (inverted) {
outrow += (height - 1) * output_pitch; // Start at the bottom row
output_pitch = NEG(output_pitch); // Negate the pitch to go up
}
for (row = 0; row < height; row++)
{
outptr = outrow;
// Fill the rest of the output row
for (column = 0; column < width; column+=4)
{
int chroma_column = column>>1;
*(outptr++) = SATURATE_8U((y_row_ptr[column]+y_row_ptr[column+1])>>5);
*(outptr++) = SATURATE_8U((v_row_ptr[chroma_column]+v_row_ptr[chroma_column+1])>>5);
*(outptr++) = SATURATE_8U((y_row_ptr[column+2]+y_row_ptr[column+3])>>5);
*(outptr++) = SATURATE_8U((u_row_ptr[chroma_column]+u_row_ptr[chroma_column+1])>>5);
}
// Advance to the next rows in the input and output images
y_row_ptr += y_pitch;// 3D Work
u_row_ptr += u_pitch;
v_row_ptr += v_pitch;
outrow += output_pitch;
}
}
// Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer
void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth)
{
//IMAGE *image = frame->channel[0];
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int descale = precision - 8;
// Get the color format from the decoded format
int color_format = info->format & COLOR_FORMAT_MASK;
// Must compile this routine with switches set for decoding to 8-bit unsigned pixels
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
assert(0);
return;
#endif
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RG48:
if(encode_format == ENCODED_FORMAT_BAYER)
{
ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height,
output_pitch, 2, num_channels);
}
else if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
int scale = 1;
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
scale = 2;
ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height,
output_pitch, scale, num_channels);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
case DECODED_FORMAT_RG64:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
assert(0);
}
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if (precision == CODEC_PRECISION_10BIT)
{
int lineskip = 1; // 3D Work
int pitch = output_pitch;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
{
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work
{
lineskip = 2;
if(decoder->channel_blend_type == 3)
pitch *= 2;
}
}
if((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side
{
SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted);
}
else
{
//ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip);
ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip);
}
}
else
{
//ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted);
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted);
}
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
#if 0
case DECODED_FORMAT_UYVY:
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
#endif
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
if (precision == CODEC_PRECISION_10BIT)
{
ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted);
}
else
{
//ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted);
assert(0);
}
break;
//#endif
case DECODED_FORMAT_YU64:
// DAN04262004
ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision);
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_YR16:
ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
#error Must set compile-time switches to decode to 8-bit pixels
#endif
START(tk_convert);
#if _ENCODE_CHROMA_OFFSET
#error Cannot handle images encoded with a non-zero chroma offset
#endif
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
START(tk_convert);
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
//ConvertPlanarYUVToRGB
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
case COLOR_FORMAT_WP13:
case COLOR_FORMAT_B64A:
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL);
break;
case DECODED_FORMAT_YUYV:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
case DECODED_FORMAT_UYVY:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
// Convert one row of packed YUYV to the specified color
void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision)
{
size_t row_size = 2 * length;
bool inverted = false;
START(tk_convert);
// Determine the type of color conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision);
break;
case DECODED_FORMAT_YUYV:
if(precision == 8)
memcpy(output, input, row_size);
else
{
//need to dither to 8-bit
assert(0);
}
break;
case DECODED_FORMAT_UYVY:
if(precision == 8)
ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY);
else
{
//need to dither to 8-bit
assert(0);
}
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210);
break;
case DECODED_FORMAT_YU64:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
case DECODED_FORMAT_BYR3:
case DECODED_FORMAT_BYR4:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
memset(output, 0, row_size);
break;
}
STOP(tk_convert);
}
#if _THREADED_DECODER
IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index,
int width, int height, int level, int type)
{
IMAGE *wavelet = transform->wavelet[index];
assert(decoder != NULL && transform != NULL);
if (decoder != NULL && transform != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Lock access to the wavelet data
#if _DELAYED_THREAD_START==0
Lock(&decoder->entropy_worker_new.lock);
#endif
// Get the wavelet from the transform data structure (thread safe)
wavelet = transform->wavelet[index];
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
// Unlock access to the wavelet data
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
return wavelet;
}
// Update the codec state with the information in a tag value pair
CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
bool optional = false;
int chunksize = 0;
bool result;
// Is this an optional tag?
if (tag < 0) {
tag = NEG(tag);
optional = true;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n",
tag, value, optional);
}
#endif
switch (tag)
{
case CODEC_TAG_ZERO: // Used internally
assert(0); // Should not occur in the bitstream
error = CODEC_ERROR_INVALID_BITSTREAM;
break;
case CODEC_TAG_SAMPLE: // Type of sample
//assert(0);
if (value == SAMPLE_TYPE_CHANNEL)
{
result = DecodeSampleChannelHeader(decoder, input);
if (!result)
error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_INDEX: // Sample index table
//assert(0); // Need to figure out how to return the group index
{
int count = value;
uint32_t *index = (uint32_t *)(&codec->channel_size[0]);
DecodeGroupIndex(input, index, count);
codec->num_channels = count;
}
break;
case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband?
{ // This tag is obsolete and not used in modern streams
int subband = value;
// Check that the subband number makes sense
assert(0 <= subband && subband <= codec->max_subband);
if (! (0 <= subband && subband <= codec->max_subband))
{
error = CODEC_ERROR_DECODING_SUBBAND;
break;
}
// Decompress the subband
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act.
codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete
{
// This tag value pair marks the beginning of the encoded coefficients
// The subband number has already been decoded
int subband = codec->band.subband;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_ENTRY: // Entry in sample index
assert(0); // Need to figure out how to return the group index
break;
case CODEC_TAG_MARKER: // Bitstream marker
{
int marker = value;
uint8_t *current_position;
// Save the current bitstream position
current_position = GetBitstreamPosition(input);
current_position -= 4; // Step back to before the GetSegment i.e. the TAG
if (IsLowPassHeaderMarker(marker))
{
// Save the bitstream position for the start of the channel
codec->channel_position = current_position;
}
else if (IsLowPassBandMarker(marker))
{
int subband = 0;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
}
break;
case CODEC_TAG_VERSION_MAJOR: // Version
assert(0);
break;
case CODEC_TAG_VERSION_MINOR: // Minor version number
assert(0);
break;
case CODEC_TAG_VERSION_REVISION: // Revision number
assert(0);
break;
case CODEC_TAG_VERSION_EDIT: // Edit number
assert(0);
break;
case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags
assert(0);
break;
case CODEC_TAG_TRANSFORM_TYPE: // Type of transform
assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST);
if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST)
{
int i;
codec->transform_type = value;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
else
error = CODEC_ERROR_TRANSFORM_TYPE;
break;
case CODEC_TAG_NUM_FRAMES: // Number of frames in the group
assert(0 <= value && value <= TRANSFORM_NUM_FRAMES);
if (0 <= value && value <= TRANSFORM_NUM_FRAMES)
codec->num_frames = value;
else
error = CODEC_ERROR_NUM_FRAMES;
break;
case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform
assert(value <= CODEC_MAX_CHANNELS);
if (value <= CODEC_MAX_CHANNELS)
codec->num_channels = value;
else
error = CODEC_ERROR_NUM_CHANNELS;
break;
case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform
assert(0 < value && value <= TRANSFORM_NUM_WAVELETS);
if (0 < value && value <= TRANSFORM_NUM_WAVELETS)
codec->num_wavelets = value;
else
error = CODEC_ERROR_NUM_WAVELETS;
break;
case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands
assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS);
if (0 < value && value <= TRANSFORM_NUM_SUBBANDS)
codec->num_subbands = value;
else
error = CODEC_ERROR_NUM_SUBBANDS;
break;
case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels
assert(0 < value && value <= TRANSFORM_NUM_SPATIAL);
if (0 < value && value <= TRANSFORM_NUM_SPATIAL)
codec->num_spatial = value;
else
error = CODEC_ERROR_NUM_SPATIAL;
break;
case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet
assert(value == TRANSFORM_FIRST_WAVELET);
if (value == TRANSFORM_FIRST_WAVELET)
codec->first_wavelet = value;
else
error = CODEC_ERROR_FIRST_WAVELET;
break;
case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel
assert(0);
break;
case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start
codec->frame.type = value;
break;
case CODEC_TAG_FRAME_WIDTH: // Width of the frame
codec->frame.width = value;
break;
case CODEC_TAG_FRAME_HEIGHT: // Height of the frame
codec->frame.height = value;
//DAN20080729 -- Initialize the default colorspace based on clip resolution
if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED)
{
int internalheight = value;
int internalwidth = codec->frame.width;
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
internalwidth *= 2;
internalheight *= 2;
}
if(internalheight > 576 || internalwidth > 720)
decoder->frame.colorspace |= COLOR_SPACE_CG_709;
else
decoder->frame.colorspace |= COLOR_SPACE_CG_601;
}
//if(decoder->frame.colorspace_filedefault)
// decoder->frame.colorspace = decoder->frame.colorspace_filedefault;
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
break;
case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
value &= ~(COLOR_SPACE_BT_601|COLOR_SPACE_BT_709); // Bayer has no 601 vs 709,
//there was a bug in 3.9.4 that had bayer flagged as 601.
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422)
{
decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709);
decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709));
//Let the VSRGB status be controllable by the calling application (e.g. Vegas)
}
else
{
decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB);
decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB));
}
}
decoder->frame.colorspace_filedefault = value;
break;
case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA)
assert(0);
break;
case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels
codec->input_format = value;
// Set the encoded format if it has not already been set
// error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value);
break;
case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data
case CODEC_TAG_OLD_ENCODED_FORMAT:
codec->encoded_format = value;
if(codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3)
codec->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_INDEX: // Position of frame within the group
codec->frame.group_index = value;
break;
case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band
codec->lowpass.subband = value;
error = SetDefaultEncodedFormat(codec);
break;
case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels
codec->lowpass.level = value;
break;
case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band
codec->lowpass.width = value;
break;
case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band
codec->lowpass.height = value;
break;
case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset
codec->lowpass.margin.top = value;
break;
case CODEC_TAG_MARGIN_BOTTOM:
codec->lowpass.margin.bottom = value;
break;
case CODEC_TAG_MARGIN_LEFT:
codec->lowpass.margin.left = value;
break;
case CODEC_TAG_MARGIN_RIGHT:
codec->lowpass.margin.right = value;
break;
case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters
codec->lowpass.pixel_offset = value;
break;
case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding
codec->lowpass.quantization = value;
break;
case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel
codec->lowpass.bits_per_pixel = value;
break;
case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer
assert(0);
break;
case CODEC_TAG_WAVELET_TYPE: // Type of wavelet
codec->highpass.wavelet_type = value;
break;
case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform
codec->highpass.wavelet_number = value;
break;
case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform
codec->highpass.wavelet_level = value;
break;
case CODEC_TAG_NUM_BANDS: // Number of wavelet bands
codec->highpass.num_bands = value;
break;
case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band
codec->highpass.width = value;
break;
case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band
codec->highpass.height = value;
break;
case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete)
codec->highpass.lowpass_border = value;
break;
case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete)
codec->highpass.highpass_border = value;
break;
case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band
codec->highpass.lowpass_scale = value;
break;
case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band
codec->highpass.lowpass_divisor = value;
break;
case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer
assert(0);
break;
case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band
codec->band.number = value;
break;
case CODEC_TAG_BAND_WIDTH: // Band data width
codec->band.width = value;
break;
case CODEC_TAG_BAND_HEIGHT: // Band data height
codec->band.height = value;
break;
case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band
codec->band.subband = value;
//assert(value != 255);
break;
case CODEC_TAG_BAND_ENCODING: // Encoding method for this band
codec->band.encoding = value;
break;
case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band
codec->band.quantization = value;
break;
case CODEC_TAG_BAND_SCALE: // Band scale factor
codec->band.scale = value;
break;
case CODEC_TAG_BAND_TRAILER: // Band trailer
assert(0);
break;
case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values
assert(0);
break;
case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees
assert(0);
break;
case CODEC_TAG_NUM_POSITIVES: // Number of positive values
assert(0);
break;
case CODEC_TAG_NUM_NEGATIVES: // Number of negative values
assert(0);
break;
case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes
assert(0);
break;
case CODEC_TAG_CHANNEL: // Channel number
assert(0);
break;
case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream
//assert(0);
break;
//assert(0);
case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio
codec->picture_aspect_x = value;
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio
codec->picture_aspect_y = value;
//assert(0);
break;
case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding
// Progressive versus interlaced decoding is specified by the sample flags
error = UpdateCodecFlags(codec, value);
break;
case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream
codec->frame_number = value;
break;
// This TAG is now support as part of the universal decoder.
// Only Prospect HD builds can decode 10bit.
case CODEC_TAG_PRECISION: // Number of bits in the video source
codec->precision = value;
{
int i;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
break;
case CODEC_TAG_PRESCALE_TABLE:
{
int i;
int prescale[TRANSFORM_MAX_WAVELETS] = {0};
for(i=0;i<TRANSFORM_MAX_WAVELETS;i++)
prescale[i] = value >> (14-i*2) & 0x3;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
memcpy(transform->prescale, prescale, sizeof(prescale));
}
}
}
break;
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
codec->version[0] = (value>>12) & 0xf;
codec->version[1] = (value>>8) & 0xf;
codec->version[2] = value & 0xff;
break;
case CODEC_TAG_QUALITY_L: //
codec->encode_quality &= 0xffff0000;
codec->encode_quality |= value;
break;
case CODEC_TAG_QUALITY_H: //
codec->encode_quality &= 0xffff;
codec->encode_quality |= value<<16;
break;
case CODEC_TAG_BAND_CODING_FLAGS:
codec->active_codebook = value & 0xf; // 0-15 valid code books
codec->difference_coding = (value>>4) & 1;
break;
// Peak table processing
case CODEC_TAG_PEAK_TABLE_OFFSET_L:
codec->peak_table.offset &= ~0xffff;
codec->peak_table.offset |= (value & 0xffff);
codec->peak_table.base = (PIXEL *)(input->lpCurrentWord);
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_TABLE_OFFSET_H:
codec->peak_table.offset &= 0xffff;
codec->peak_table.offset |= (value & 0xffff)<<16;
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_LEVEL:
codec->peak_table.level = value;
codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL);
break;
case CODEC_TAG_PEAK_TABLE:
//this is the chunk header, so we have peak data
codec->peak_table.level = 0; // reset for the next subband
//Just skip as the data was read ahead
chunksize = value;
chunksize &= 0xffff;
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
break;
#if (1 && DEBUG)
case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only)
assert(0);
break;
#endif
default: // Unknown tag
if(tag & 0x4000)
{
if(tag & 0x2000) // i.e. 0x6xxx = 24bit size.
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else // 16bit size
{
chunksize = value;
chunksize &= 0xffff;
}
}
else if(tag & 0x2000) //24bit LONGs chunk size
{
optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not
// updated the size and turned the tag to optional. TODO : WHY
chunksize = 0; // not not skip
// chunksize = value + ((tag & 0xff)<<16);
// do not skip an unknown but optional chunk
// These are only use to size subbands, but the data within should not be skipped
// unless
if((tag & 0xff00) == CODEC_TAG_UNCOMPRESS)
{
optional = true;
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord;
decoder->uncompressed_size = chunksize*4;
decoder->sample_uncompressed = 1;
}
}
assert(optional);
if(!optional)
{
error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG;
}
else if(chunksize > 0) // skip this option chunk
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
break;
}
return error;
}
void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _THREADED_DECODER
// Lock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n",
wavelet->band_valid_flags, BAND_VALID_MASK(band));
}
#endif
// Update the wavelet band flags
wavelet->band_valid_flags |= BAND_VALID_MASK(band);
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _THREADED_DECODER
// Unlock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
// Update the wavelet band flags
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type)
{
uint32_t threaded_band_mask;
uint32_t wavelet_band_mask;
uint32_t decoded_band_mask;
bool decoded_bands_valid;
// Has this wavelet been created?
if (wavelet == NULL)
{
// Too soon to wait for the wavelet bands to be decoded
return false;
}
// Is this a fieldplus transform?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
// Is this the temporal wavelet?
if (index == 2)
{
assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL);
assert(wavelet->num_bands == 2);
// Earlier transforms in the queue will compute both wavelet bands
return true;
}
// Is this wavelet at the end of a chain of transforms?
if (index == 3 || index == 5)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
// Is this a spatial transform?
else if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Is this wavelet at the top of the pyramid?
if (index == 2)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#if 0
// Is this wavelet at the bottom of the pyramid?
else if (index == 0)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#endif
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
else
{
// Unknown type of transform
assert(0);
// Assume that the bands are not valid
return false;
}
// Compute the mask for the bands in this wavelet
decoded_band_mask = ((1 << wavelet->num_bands) - 1);
// Clear the bit for the band computed by the threaded transform
decoded_band_mask &= ~threaded_band_mask;
// Compute the wavelet bands that have been decoded
wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask);
// Have all of the bands not computed by the transform thread been decoded?
decoded_bands_valid = (wavelet_band_mask == decoded_band_mask);
return decoded_bands_valid;
}
void QueueThreadedTransform(DECODER *decoder, int channel, int index)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
TRANSFORM *transform = decoder->transform[channel];
//IMAGE *wavelet = transform->wavelet[index];
int precision = codec->precision;
// The transform data structure must exist
assert(transform != NULL);
// The transform thread variables should have been created
{
int free_entry;
#if _DELAYED_THREAD_START==0
// Lock access to the transform queue
Lock(&decoder->entropy_worker_new.lock);
#endif
// Copy the transform parameters into the next queue entry
free_entry = decoder->transform_queue.free_entry;
assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH);
if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH)
{
assert(transform != NULL);
assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS);
assert(0 <= index && index < TRANSFORM_MAX_WAVELETS);
// Note: The wavelet may not exist when the transform is queued
decoder->transform_queue.queue[free_entry].transform = transform;
decoder->transform_queue.queue[free_entry].channel = channel;
decoder->transform_queue.queue[free_entry].index = index;
decoder->transform_queue.queue[free_entry].precision = precision;
decoder->transform_queue.queue[free_entry].done = 0;
// Update the transform request queue
decoder->transform_queue.free_entry++;
decoder->transform_queue.num_entries++;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index);
}
#endif
}
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
#if _THREADED_DECODER
void WaitForTransformThread(DECODER *decoder)
{
if(decoder->entropy_worker_new.pool.thread_count)
{
#if _DELAYED_THREAD_START
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool);
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
}
}
#endif
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_YUV;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
PIXEL16U *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = (uint8_t *)output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam)
{
DECODER *decoder = (DECODER *)lpParam;
FILE *logfile = decoder->logfile;
struct interlace_data *data = &decoder->interlaced_worker.interlace_data;
int thread_index;
HANDLE hObjects[2];
DWORD dwReturnValue;
if(decoder->thread_cntrl.affinity)
{
HANDLE hCurrentThread = GetCurrentThread();
SetThreadAffinityMask(hCurrentThread,decoder->thread_cntrl.affinity);
}
// Set the handler for system exceptions
#ifdef _WINDOWS
SetDefaultExceptionHandler();
#endif
// Determine the index of this worker thread
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
thread_index = decoder->interlaced_worker.thread_count++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// The transform worker variables should have been created
assert(decoder->interlaced_worker.start_event[thread_index] != NULL);
assert(decoder->interlaced_worker.row_semaphore != NULL);
assert(decoder->interlaced_worker.done_event[thread_index] != NULL);
assert(decoder->interlaced_worker.stop_event != NULL);
if (!(decoder->interlaced_worker.start_event[thread_index] != NULL &&
decoder->interlaced_worker.row_semaphore != NULL &&
decoder->interlaced_worker.done_event[thread_index] != NULL &&
decoder->interlaced_worker.stop_event != NULL)) {
return 1;
}
hObjects[0] = decoder->interlaced_worker.start_event[thread_index];
hObjects[1] = decoder->interlaced_worker.stop_event;
for (;;)
{
// Wait for the signal to begin processing a transform
dwReturnValue = WaitForMultipleObjects(2, hObjects, false, INFINITE);
// Received a signal to begin inverse transform processing?
if (dwReturnValue == WAIT_OBJECT_0)
{
int type; // Type of inverse transform to perform
int frame_index; // Index of output frame to produce
int num_channels; // Number of channels in the transform array
uint8_t *output; // Output frame buffer
int pitch; // Output frame pitch
FRAME_INFO info; // Format of the output frame
int chroma_offset; // Offset for the output chroma
int precision; // Source pixel bit depth
// Lock access to the transform data
if(decoder->interlaced_worker.lock_init) {
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
// Get the processing parameters
type = data->type;
frame_index = data->frame;
num_channels = data->num_channels;
output = data->output;
pitch = data->pitch;
memcpy(&info, &data->info, sizeof(FRAME_INFO));
chroma_offset = data->chroma_offset;
precision = data->precision;
// Unlock access to the transform data
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// Select the type of inverse transform to perform
switch (type)
{
case THREAD_TRANSFORM_FRAME_YUV:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels,
output, pitch, &info, chroma_offset, precision);
break;
case THREAD_TRANSFORM_FRAME_ROW16U:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels,
(PIXEL16U *)output, pitch, &info, chroma_offset, precision);
break;
default:
assert(0);
break;
}
// Signal that this thread is done
SetEvent(decoder->interlaced_worker.done_event[thread_index]);
}
else
{
// Should have a condition that causes the thread to terminate
assert(dwReturnValue == WAIT_OBJECT_0+1 || dwReturnValue == WAIT_ABANDONED);
break;
}
}
return 0;
}
#endif
void GetDecodedFrameDimensions(TRANSFORM **transform_array,
int num_channels,
int frame_index,
int resolution,
int *decoded_width_out,
int *decoded_height_out)
{
IMAGE *wavelet = NULL;
int decoded_scale = 0;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_FULL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
// Is this an intra frame?
if (wavelet == NULL) {
wavelet = transform_array[0]->wavelet[2];
}
break;
default:
assert(0);
break;
}
// Compute the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
if (decoded_width_out) {
*decoded_width_out = decoded_width;
}
if (decoded_height_out) {
*decoded_height_out = decoded_height;
}
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
//int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_W13A: //DAN20101207 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YUYV: //?
case DECODED_FORMAT_UYVY: //?
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
error = CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR2:
case DECODED_FORMAT_BYR4:
{
//bool linearRestore = false;
unsigned short *curve = NULL;
if(decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
curve = decoder->BYR4LinearRestore;
}
ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve);
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR3:
ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
frame_size = width * height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
}
// Using the RGBFilterBuffer16 as scratch space
ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
#if _THREADED
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct uncompressed v210 YUV format to the requested output format
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if(format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
if((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
uint32_t *input_ptr = (uint32_t *)src;
int pos = 0;
int column=0,length = width;
length -= length % 6; //DAN03252004 -- fix a memory overflow.
for (column=0; column < length; column += 6)
{
uint32_t yuv;
int y;
int u;
int v;
// Read the first word
yuv = *(input_ptr++);
u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
// Expand the pixels to sixteen bits
u <<= 6;
y <<= 6;
v <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the second word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the third word
yuv = *(input_ptr++);
v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
// Read the fourth word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
}
if(format == DECODED_FORMAT_UYVY)
{
for (column=0; column < pos; column += 2)
{
int t = dst[column];
dst[column] = dst[column+1];
dst[column+1] = t;
}
}
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24)
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
for(i=0; i<lines; i++)
{
src = (uint8_t *)decoder->uncompressed_chunk;
src += row * unc_Stride;
// Repack the row of 10-bit pixels into 16-bit pixels
ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2);
// Advance to the next rows in the input and output images
y_row_ptr += orig_width*2;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
}
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + width;
v_row_ptr = u_row_ptr + width/2;
if(lines == 2)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*2] + y_row_ptr[i*2+1] + y_row_ptr[orig_width*2+i*2] + y_row_ptr[orig_width*2+i*2+1]) >> 2;
}
else if(lines == 4)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*4] + y_row_ptr[i*4+2] + y_row_ptr[orig_width*2*2+i*4] + y_row_ptr[orig_width*2*2+i*4+2]) >> 2;
}
roi.width = width;
roi.height = 1;
planar_output[0] = (uint8_t *)y_row_ptr;
planar_output[1] = (uint8_t *)v_row_ptr;
planar_output[2] = (uint8_t *)u_row_ptr;
planar_pitch[0] = 0;
planar_pitch[1] = 0;
planar_pitch[2] = 0;
if(decoder->apply_color_active_metadata)
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline,
info->format, &whitebitdepth, &flags);
}
else
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
}
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct uncompressed DPX0 RGB format to the requested output format
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
//int output_format = info->output_format; // used by image_dev_only decodes
int width = info->width;
int height = info->height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) &&
resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(format != DECODED_FORMAT_DPX0)
{
int unc_Stride = decoder->uncompressed_size / height;
ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format);
}
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
whitebitdepth = 13;
if(decoder->apply_color_active_metadata)
flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR;
else
flags = 0;
roi.width = width;
roi.height = 1;
if(lines == 1)
{
uint16_t *sptr;
uint32_t j,*lptr = (uint32_t *)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2);
sptr = (uint16_t *)lptr;
for(i=0; i<width;i+=8)
{
int val,r,g,b;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8; j++)
{
ptr[j] = sptr[0] >> 3;
ptr[j+8] = sptr[1] >> 3;
ptr[j+16] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8; j++)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+8] = g;
ptr[j+16] = b;
}
}
}
else
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8*3; j+=3)
{
ptr[j] = sptr[0] >> 3;
ptr[j+1] = sptr[1] >> 3;
ptr[j+2] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8*3; j+=3)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+1] = g;
ptr[j+2] = b;
}
}
}
ptr += 24;
}
}
else if(lines == 2)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>2)+1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
else if(lines == 4)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>1)+2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
int resolution = info->resolution;
//int format = info->format;
// Switch to the subroutine for the requested resolution
switch (resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch);
break;
case DECODED_RESOLUTION_FULL:
//return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
//case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
//return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_QUARTER:
//return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// The decoded resolution is not supported by this routine
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to full resolution
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
//int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
//TODO: Need to add more output formats to this routine
switch (format)
{
case DECODED_FORMAT_RGB32:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
// TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
// ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
case DECODED_FORMAT_RGB24:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
//TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
//ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data and demosaic to full resolution
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
error = CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct Bayer encoded data to half resolution
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int frame_width = info->width;
int frame_height = info->height;
//int resolution = info->resolution;
int format = info->format;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
PIXEL16U *g1_plane;
PIXEL16U *rg_plane;
PIXEL16U *bg_plane;
PIXEL16U *g2_plane;
int g1_pitch;
int rg_pitch;
int bg_pitch;
int g2_pitch;
#if 0
int channel;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
#endif
// Get the lowpass bands in the wavelet coresponding to the output frame
g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0];
rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0];
bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0];
if(transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK
{
g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0];
g2_pitch = transform_array[3]->wavelet[frame]->pitch;
}
else
{
g2_plane = NULL;
g2_pitch = 0;
}
// Get the pitch of each plane
g1_pitch = transform_array[0]->wavelet[frame]->pitch;
rg_pitch = transform_array[1]->wavelet[frame]->pitch;
bg_pitch = transform_array[2]->wavelet[frame]->pitch;
switch (format)
{
case DECODED_FORMAT_RGB32:
ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch,
bg_plane, bg_pitch, g2_plane, g2_pitch,
output_buffer, output_pitch,
frame_width, frame_height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to quarter resolution
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//FRAME_INFO *info = &decoder->frame;
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
//int format = info->format;
//TODO: Need to finish this routine
assert(0);
return error;
}
// Reconstruct the original YUV 4:2:2 encoded format to the requested output format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
int resolution = info->resolution;
int format = info->format;
//int color_space = decoder->frame.colorspace;
//TODO: Eliminate use of the chroma offset
int chroma_offset = decoder->codec.chroma_offset;
#if _THREADED
// Type of threaded inverse transform
//int type;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
//TODO: Split this routine into subroutines for progressive versus interlaced video
//TODO: Split progressive and interlaced routines into subroutines for each resolution
if(resolution == DECODED_RESOLUTION_HALF)
{
bool inverted = false;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
return CODEC_ERROR_OKAY;
#endif
}
else
{
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
return CODEC_ERROR_OKAY;
}
// Was the video source interlaced or progressive?
if (progressive)
{
// The video source was progressive (the first transform was a spatial transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
/*if(decoder->use_active_metadata_decoder)
{
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV: // computing the active metadata.
case DECODED_FORMAT_UYVY:
return CODEC_ERROR_OKAY;
break;
}
}*/
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
return CODEC_ERROR_OKAY;
#endif
}
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
return CODEC_ERROR_OKAY;
#endif
}
break;
//Handle sizes that are smaller than the interim decode buffer //DAN20081222
case DECODED_FORMAT_CbYCrY_10bit_2_8:
decoder->upper_plane = output;
decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2;
// Use the address and pitch of the lower plane
output = decoder->lower_plane;
pitch = decoder->frame.width * 2;
// Fall through and compute the inverse spatial transform
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
case DECODED_FORMAT_CbYCrY_8bit:
case DECODED_FORMAT_CbYCrY_16bit:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_V210:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalYUVStrip16sToYUVOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
if((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false)
{
#if _THREADED
TransformInverseSpatialThreadedYUV422ToBuffer(decoder,
frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
#elif 0
TransformInverseSpatialToBuffer(decoder, transform_array, frame,
num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array,
frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripYUV16sToPackedRGB32);
#endif
return CODEC_ERROR_OKAY;
}
#if _THREADED
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame,
num_channels, output, pitch,
&info2, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
return CODEC_ERROR_OKAY;
}
#endif
break;
default:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
// else Return the error code for unsupported output format
break;
}
}
}
else
{
// The video source was interlaced (the first transform was a frame transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
bool inverted = false;
if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) {
// info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active.
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_NV12:
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder
if(decoder->use_active_metadata_decoder)
{
int frame_size = info->width * info->height * 4;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
//TransformInverseSpatialUniversalThreadedToRow16u(
// decoder, frame, num_channels,
// (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2,
// info, chroma_offset, precision);
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4, info,
&decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 2; // yuv
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
return CODEC_ERROR_OKAY;
}
}
switch (format)
{
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_WP13: //DAN20110203 - missing
case DECODED_FORMAT_W13A: //DAN20110203 - missing
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into
case DECODED_FORMAT_R408: //the output buffer
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
//Old code converts 4:2:2 directly to RGBA (single threaded.)
//TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
// info, &decoder->scratch, chroma_offset, precision);
#endif
return CODEC_ERROR_OKAY;
default:
// else Return the error code for unsupported output format
break;
}
}
}
// The output format is not supported by this routine
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return error;
}
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
//IMAGE *wavelet;
//int wavelet_width;
//int wavelet_height;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
//int decoded_scale;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//TODO: Eliminate use of the chroma offset
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
// This routine should only be called for progressive frames
assert(codec->progressive);
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) {
return CODEC_ERROR_OKAY;
}
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
return CODEC_ERROR_OKAY;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
return CODEC_ERROR_RESOLUTION;
}
// Compute the decoded width and height
ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
output += (info->height-1)*pitch;
pitch = -pitch;
}
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
if (!(info->width >= decoded_width)) {
return CODEC_ERROR_FRAMESIZE;
}
// assert((info->height+7)/8 >= (decoded_height+7)/8);
// if (!(info->height+7)/8 >= (decoded_height+7)/8) {
// return CODEC_ERROR_FRAMESIZE;
// }
START(tk_convert);
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
int chroma_offset = decoder->codec.chroma_offset;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
}
else
// Quarter resolution
if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Output quarter resolution for the two frame GOP
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
// Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame
}
else
// Half resolution
if (resolution == DECODED_RESOLUTION_HALF)
{
IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS];
int precision = codec->precision;
int chroma_offset = 0;
int channel;
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Get the first level wavelet in each channel
for (channel = 0; channel < num_channels; channel++)
{
wavelet_array[channel] = transform_array[channel]->wavelet[frame];
}
// Pack the pixels from the lowpass band in each channel into the output buffer
CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch,
info, chroma_offset, precision);
}
}
// Full resolution or half horizontal
else
{
int chroma_offset = 0;
int precision = codec->precision;
// Reconstruct the output frame from a full resolution decode
//assert(resolution == DECODED_RESOLUTION_FULL);
if(decoder->use_active_metadata_decoder)
{
int frame_size, channels = 3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
channels = 4;
frame_size = info->width * info->height * channels * 2;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, &decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
switch (info->format)
{
case DECODED_FORMAT_B64A:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2B64A);
#else
TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YU64: //TODO : Threading
TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2RG30);
#else
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripRGB16sToPackedYUV8u);
#endif
break;
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGBA2YUVA);
#else
assert(0);
#endif
break;
case DECODED_FORMAT_YR16:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YR16);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_V210:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2v210);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
assert(0);// missing non-threaded version
#endif
break;
//TODO: Add code to handle other Avid pixel formats
case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT
case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8
case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14
case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6
assert(0);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Invalid decoded format: %d\n", info->format);
}
#endif
assert(0);
error = CODEC_ERROR_INVALID_FORMAT;
break;
}
}
}
STOP(tk_convert);
return error;
}
// Convert 16-bit signed lowpass data into the requested output format
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision)
{
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int format = info->format;
// Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values
const int shift = 16 - precision - PRESCALE_LUMA;
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
inverted = true;
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32_INVERTED:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //WIP
ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height,
output_pitch, format, inverted, shift, num_channels);
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
{
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
if (info->format == COLOR_FORMAT_YUYV)
{
ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
else if (info->format == COLOR_FORMAT_UYVY)
{
ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
}
break;
default:
{
int y;
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
IMAGE *a_image = image_array[3];
unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr;
//unsigned short *scanline2 = scanline + output_width*3;
uint8_t *newline = (uint8_t *)output_buffer;
unsigned short *Rptr,*Gptr,*Bptr,*Aptr = NULL;
Rptr = (unsigned short *)r_image->band[0];
Gptr = (unsigned short *)g_image->band[0];
Bptr = (unsigned short *)b_image->band[0];
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
Aptr = (unsigned short *)a_image->band[0];
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
memcpy(scanline+info->width*3, Aptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
Aptr += a_image->pitch/2;
Convert4444LinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
else
{
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
ConvertLinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
}
//assert(0);
break;
}
STOP(tk_convert);
}
#if _THREADED
// Threaded inverse transform using the new threads API
void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//TODO: Add support for more output formats
int format = DECODED_FORMAT_RGB32;
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Choose the correct inverse horizontal filter for the output format
switch (format)
{
case DECODED_FORMAT_RGB32:
horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32;
break;
default:
assert(0);
return;
}
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "All worker threads signalled done\n");
}
#endif
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToOutput(
DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Routines for the worker threads that use the new threads API
void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index,
int frame_index, int num_channels,
uint8_t *output_buffer, int output_pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
PIXEL *lowlow_band[CODEC_MAX_CHANNELS];
PIXEL *lowhigh_band[CODEC_MAX_CHANNELS];
PIXEL *highlow_band[CODEC_MAX_CHANNELS];
PIXEL *highhigh_band[CODEC_MAX_CHANNELS];
int lowlow_pitch[CODEC_MAX_CHANNELS];
int lowhigh_pitch[CODEC_MAX_CHANNELS];
int highlow_pitch[CODEC_MAX_CHANNELS];
int highhigh_pitch[CODEC_MAX_CHANNELS];
int channel_width[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr;
uint8_t *plane_array[TRANSFORM_MAX_CHANNELS];
int plane_pitch[TRANSFORM_MAX_CHANNELS];
int output_width = info->width;
int output_height = info->height;
int half_height = output_height/2;
int luma_band_width;
ROI strip;
char *bufptr;
int last_row;
int last_display_row;
int last_line;
int channel;
int row;
int odd_display_lines = 0;
THREAD_ERROR error;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
//TODO: Replace uses of buffer variables with calls to the scratch space API
// This version is for 16-bit pixels
assert(sizeof(PIXEL) == 2);
// Must have a valid inverse horizontal filter
assert(horizontal_filter_proc != NULL);
// Check for enough space in the local array allocations
// assert(num_channels <= CODEC_NUM_CHANNELS);
assert(num_channels <= TRANSFORM_MAX_CHANNELS);
// Divide the buffer space between the four threads
buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4
buffer += buffer_size * thread_index;
// Round the buffer pointer up to the next cache line
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK));
bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE);
// Allocate buffer space for the output rows from each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the row width for this channel
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
int width = wavelet->width;
int height = wavelet->height;
//int pitch = wavelet->pitch;
size_t channel_buffer_size;
// Compute the width and pitch for the output rows stored in this buffer
int buffer_width = 2 * width;
int buffer_height = 2;
int buffer_pitch = ALIGN16(buffer_width);
// Compute the total allocation for this channel
channel_buffer_size = buffer_height * buffer_pitch;
// Check that there is enough space available
assert(channel_buffer_size <= buffer_size);
// Allocate the buffer for this channel
plane_array[channel] = (uint8_t *)bufptr;
// Remember the pitch for rows in this channel
plane_pitch[channel] = buffer_pitch;
// Advance the buffer pointer past the allocated space for this channel
bufptr += channel_buffer_size;
// Reduce the amount of space remaining in the buffer
buffer_size -= channel_buffer_size;
// The dimensions of the output image are the same as the luma channel
if (channel == 0)
{
strip.width = buffer_width;
strip.height = buffer_height;
last_row = height;
//DAN20050606 Added to fix issue with non-div by 8 heihts.
last_display_row = (info->height+1)/2; // DAN20090215 -- fix for odd display lines.
odd_display_lines = info->height & 1;
// Remember the width of the wavelet bands for luma
luma_band_width = width;
}
// Save the bands per channel for routines that process all channels at once
lowlow_band[channel] = wavelet->band[0];
lowhigh_band[channel] = wavelet->band[1];
highlow_band[channel] = wavelet->band[2];
highhigh_band[channel] = wavelet->band[3];
lowlow_pitch[channel] = wavelet->pitch;
lowhigh_pitch[channel] = wavelet->pitch;
highlow_pitch[channel] = wavelet->pitch;
highhigh_pitch[channel] = wavelet->pitch;
// Remember the width of the wavelet for this channel
channel_width[channel] = width;
}
// Use the remaining buffer space for intermediate results
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK));
buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE);
if (last_row == last_display_row)
{
last_line = half_height - 1;
}
else
{
last_line = half_height;
}
if(odd_display_lines)
last_line++;
if (thread_index == TRANSFORM_WORKER_TOP_THREAD)
{
// Process the first row
row = 0;
output_row_ptr = output_buffer;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the first row using special border filters for the top row
InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc);
}
if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1)
{
if(last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash
{
int pitch = output_pitch;
// Process the last row
row = last_row - 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
pitch >>= 1;
// Begin filling the last output row with results
output_row_ptr = output_buffer + row * 2 * pitch;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the last row using special border filters for the bottom row
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix
output_row_ptr -= output_pitch;
InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision, odd_display_lines,
horizontal_filter_proc);
}
}
// Loop until all of the middle rows have been processed
for (;;)
{
int work_index;
int row;
// Wait for one row from each channel to process
error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index);
// Is there another row to process?
if (error == THREAD_ERROR_OKAY)
{
int pitch = output_pitch;
// Compute the next row to process from the work index
row = work_index + 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked
pitch >>= 1;
// Compute the output row corresponding to this row index
output_row_ptr = output_buffer + row * 2 * pitch;
}
else
{
// No more work to do
return;
}
// Is the row inside the top and bottom border?
if (0 < row && row < last_line)
{
int outputlines = 2;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
if(odd_display_lines && row==last_line-1)
{
outputlines = 1;
}
// Process the middle row using the normal wavelet filters
InvertSpatialMiddleRow16sToOutput(decoder, thread_index,
lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc,
outputlines);
}
}
}
#endif //_THREADED
bool GetTuplet(unsigned char *data, int datasize,
unsigned short findtag, unsigned short *retvalue)
{
bool ret = false;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
//char t[100];
InitBitstream(&myinput);
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
pinput = &myinput;
do
{
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = true;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
/*!
Copied from metadata.cpp in the cedoc common directory
*/
uint8_t *GetTupletAddr(uint8_t *data,
int datasize,
uint16_t findtag,
int16_t *retvalue)
{
unsigned char *ret = NULL;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
if (data == NULL || datasize == 0) {
return NULL;
}
//InitBitstream(&myinput);
memset(&myinput, 0, sizeof(BITSTREAM));
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
myinput.nBitsFree = BITSTREAM_LONG_SIZE;
pinput = &myinput;
do
{
//BOOL optional = FALSE;
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
//optional = TRUE;
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = pinput->lpCurrentWord;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
|
GB_unop__minv_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int16_int16)
// op(A') function: GB (_unop_tran__minv_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 16) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int16_int16)
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 16) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 16) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mat-mul_gpu.c | /*
This program performs matrix multiplication on the GPU with
dynamically allocated matrices.
Author: Gleison Souza Diniz Mendonça
Date: 04-01-2015
version 2.0
Run:
ipmacc mat-mul_gpu.c -o mat
./mat matrix-size
*/
#include "BenchmarksUtil.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define PERCENT_DIFF_ERROR_THRESHOLD 0.01
// Initialize matrices.
void init(float *a, float *b, float *c_cpu, float *c_gpu) {
int i, j;
for (i = 0; i < SIZE; ++i) {
for (j = 0; j < SIZE; ++j) {
a[i * SIZE + j] = (float)i + j % 100;
b[i * SIZE + j] = (float)i + j % 100;
c_cpu[i * SIZE + j] = 0.0f;
c_gpu[i * SIZE + j] = 0.0f;
}
}
}
/// matrix multiplication algorithm GPU
/// s = size of matrix
void mul_GPU(float *a, float *b, float *c) {
#pragma omp target map(to : a[ : SIZE *SIZE], b[0 : SIZE *SIZE]) \
map(tofrom : c[ : SIZE *SIZE]) device(DEVICE_ID)
{
#pragma omp parallel for // collapse(1)
for (int i = 0; i < SIZE; ++i) {
for (int j = 0; j < SIZE; ++j) {
float sum = 0.0;
for (int k = 0; k < SIZE; ++k) {
sum += a[i * SIZE + k] * b[k * SIZE + j];
}
c[i * SIZE + j] = sum;
}
}
}
}
void mul_CPU(float *a, float *b, float *c) {
int i, j, k;
float sum = 0.0;
for (i = 0; i < SIZE; ++i) {
for (j = 0; j < SIZE; ++j) {
sum = 0.0;
for (k = 0; k < SIZE; ++k) {
sum = sum + a[i * SIZE + k] * b[k * SIZE + j];
}
c[i * SIZE + j] = sum;
}
}
}
int compareResults(float *b_cpu, float *b_gpu) {
int i, j, fail;
fail = 0;
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
if (percentDiff(b_cpu[i * SIZE + j], b_gpu[i * SIZE + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
if (i < 10)
fprintf(stdout, "%f != %f \n", b_cpu[i * SIZE + j],
b_gpu[i * SIZE + j]);
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end;
int fail = 0;
float *a, *b, *c_cpu, *c_gpu;
a = (float *)malloc(sizeof(float) * SIZE * SIZE);
b = (float *)malloc(sizeof(float) * SIZE * SIZE);
c_cpu = (float *)calloc(sizeof(float), SIZE * SIZE);
c_gpu = (float *)calloc(sizeof(float), SIZE * SIZE);
init(a, b, c_cpu, c_gpu);
fprintf(stdout, "<< Matrix Multiplication >>\n");
t_start = rtclock();
mul_GPU(a, b, c_gpu);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
mul_CPU(a, b, c_cpu);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(c_cpu, c_gpu);
#endif
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return fail;
}
|
matrix.c | //
// Created by Ayaz BADOURALY
//
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef WITH_LIBPNG
#include <png.h>
#endif
#include "file.h"
#include "matrix.h"
struct COO_Matrix construct_coo_matrix ( const int nnz )
{
struct COO_Matrix mtx;
mtx.nnz = nnz;
mtx.rows = malloc(mtx.nnz * sizeof(int));
mtx.cols = malloc(mtx.nnz * sizeof(int));
mtx.values = malloc(mtx.nnz * sizeof(double));
return mtx;
}
struct COO_Matrix from_graphfile_coo_matrix ( const char *filename, int *nodes_number )
/*
* we should use hashtable instead of mapping
*/
{
/*
* return the adjacency matrix of the graph in file, which path is filename
* return the number of nodes ( size of the matrix ) in nodes_number
* return a array of string in mapping, which contains the mapping between row/col of the matrix and node name
*/
/***********************************************************/
bool nodes_number_known = false;
char *line = NULL, *arrow_from = NULL, *arrow_to = NULL,
**mapping = NULL;
FILE *stream = NULL;
int known_nodes_number = 0, arrow_from_index = 0, arrow_to_index = 0;
struct COO_Matrix mtx = construct_coo_matrix(0);
/***********************************************************/
stream = fopen(filename, "r");
if ( stream == NULL ) {
file_opening_failure(filename);
}
line = malloc(buffer_size * sizeof(char));
arrow_from = malloc(buffer_size * sizeof(char));
arrow_to = malloc(buffer_size * sizeof(char));
/*
* start reading file line by line
*/
while ( (line = get_line(stream, filename)) != NULL )
{
/*
* skiping commented line
*/
if ( strncmp(line, "#", 1) == 0 ) {
continue;
}
/*
* parsing line
* cases are : first line ( number of nodes ) / others lines ( arrows )
*/
if ( ! nodes_number_known ) {
sscanf(line, "%d", nodes_number);
mapping = malloc(*nodes_number * sizeof(char *));
nodes_number_known = true;
}
else {
arrow_from_index = -1;
arrow_to_index = -1;
sscanf(line, "%s %s", arrow_from, arrow_to);
/*
* get indexes from mapping
*/
for ( int i = 0 ; i < known_nodes_number ; ++i ) {
if ( strcmp(arrow_from, mapping[i]) == 0 ) {
arrow_from_index = i;
}
if ( strcmp(arrow_to, mapping[i]) == 0 ) {
arrow_to_index = i;
}
if ( arrow_from_index != -1 && arrow_to_index != -1 ) {
break;
}
}
if ( arrow_from_index == -1 ) {
arrow_from_index = known_nodes_number;
mapping[arrow_from_index] = malloc(sizeof(arrow_from));
strcpy(mapping[arrow_from_index], arrow_from);
++known_nodes_number;
}
if ( strcmp(arrow_to, arrow_from) == 0 ) {
arrow_to_index = arrow_from_index;
}
else if ( arrow_to_index == -1 ) {
arrow_to_index = known_nodes_number;
mapping[arrow_to_index] = malloc(sizeof(arrow_to));
strcpy(mapping[arrow_to_index], arrow_to);
++known_nodes_number;
}
/*
* adding arrow to the matrix
*/
set_value_coo_matrix(&mtx, arrow_to_index, arrow_from_index, 1);
}
}
fclose(stream);
free(line);
free(arrow_from);
free(arrow_to);
/*
* saving mapping table in a file
*/
stream = fopen(get_file_path("mapping.txt", OUTPUT), "w");
if ( stream == NULL ) {
file_opening_failure(filename);
}
for ( int i = 0 ; i < *nodes_number ; ++i ) {
//printf("index = %d\t->\tnode name = \"%s\"\n", i, mapping[i]);
fprintf(stream, "index = %d\t->\tnode name = \"%s\"\n", i, mapping[i]);
}
fclose(stream);
free(mapping);
return mtx;
}
double get_value_coo_matrix ( const struct COO_Matrix *mtx, const int row, const int col )
{
for ( int i = 0 ; i < mtx->nnz ; ++i ) {
if ( mtx->rows[i] == row && mtx->cols[i] == col ) {
return mtx->values[i];
}
}
return 0;
}
int set_value_coo_matrix ( struct COO_Matrix *mtx, const int row, const int col, const double value )
{
/*
* return EXIT_SUCCESS if value was added to matrix
* return EXIT_FAILURE if value is 0 or if an error occured
*/
/***********************************************************/
double *new_values = NULL;
int i = 0, new_nnz = 0,
*new_rows = NULL, *new_cols = NULL;
/***********************************************************/
if ( value == 0 ) {
return EXIT_FAILURE;
}
new_nnz = mtx->nnz + 1;
new_rows = malloc(new_nnz * sizeof(int));
new_cols = malloc(new_nnz * sizeof(int));
new_values = malloc(new_nnz * sizeof(double));
if ( new_rows == NULL
|| new_cols == NULL
|| new_values == NULL ) {
free(new_rows);
free(new_cols);
free(new_values);
fprintf(stderr, "\nAn error occurred in set_value_coo_matrix...\n");
exit(EXIT_FAILURE);
}
for ( i = 0 ; i < mtx->nnz ; ++i ) {
if ( mtx->rows[i] == row && mtx->cols[i] == col ) {
/*
* there already is a value at (row,col) coordinates
* we put the new value
*/
mtx->values[i] = value;
free(new_rows);
free(new_cols);
free(new_values);
return EXIT_SUCCESS;
}
else if (( mtx->rows[i] == row && mtx->cols[i] > col )
|| mtx->rows[i] > row ) {
new_rows[i] = row;
new_cols[i] = col;
new_values[i] = value;
break; /* ending for loop */
}
else
{
new_rows[i] = mtx->rows[i];
new_cols[i] = mtx->cols[i];
new_values[i] = mtx->values[i];
}
}
if ( i == mtx->nnz ) {
/*
* value hasn't been added to new_values
* so we append it at the end
*/
new_rows[new_nnz - 1] = row;
new_cols[new_nnz - 1] = col;
new_values[new_nnz - 1] = value;
}
else {
/*
* value has been added at index i
* so we append the last indexes of mtx to new_
*/
memcpy(&new_rows[i + 1], &mtx->rows[i], (mtx->nnz - i)*sizeof(int));
memcpy(&new_cols[i + 1], &mtx->cols[i], (mtx->nnz - i)*sizeof(int));
memcpy(&new_values[i + 1], &mtx->values[i], (mtx->nnz - i)*sizeof(double));
}
/*
* updating mtx
*/
destruct_coo_matrix(mtx);
mtx->rows = new_rows;
mtx->cols = new_cols;
mtx->values = new_values;
mtx->nnz = new_nnz;
return EXIT_SUCCESS;
}
int sum_coo_matrix ( struct COO_Matrix *mtx_dest, const struct COO_Matrix *mtx_src )
{
/*
* this function operate : mtx_dest = mtx_dest + mtx_src
* return EXIT_FAILURE if an error occured
*/
int status;
for ( int i = 0 ; i < mtx_src->nnz ; ++i )
{
status = set_value_coo_matrix(mtx_dest, mtx_src->rows[i], mtx_src->cols[i], mtx_dest->values[i] + mtx_src->values[i] );
if ( status == EXIT_FAILURE ) {
return EXIT_FAILURE;
}
}
return EXIT_SUCCESS;
}
int copy_coo_matrix ( struct COO_Matrix *mtx_dest, const struct COO_Matrix *mtx_src )
{
/*
* this function operate : mtx_dest = mtx_src
* return EXIT_FAILURE if an error occured
*/
int status;
for ( int i = 0 ; i < mtx_src->nnz ; ++i )
{
status = set_value_coo_matrix(mtx_dest, mtx_src->rows[i], mtx_src->cols[i], mtx_src->values[i] );
if ( status == EXIT_FAILURE ) {
return EXIT_FAILURE;
}
}
return EXIT_SUCCESS;
}
void print_coo_matrix ( const struct COO_Matrix *mtx )
{
printf("Row indexes : [");
for ( int i = 0 ; i < mtx->nnz ; ++i ) {
printf(" %d", mtx->rows[i]);
if ( i != mtx->nnz - 1 ) {
printf(" ;");
}
}
printf(" ]\n");
printf("Col indexes : [");
for ( int i = 0 ; i < mtx->nnz ; ++i ) {
printf(" %d", mtx->cols[i]);
if ( i != mtx->nnz - 1 ) {
printf(" ;");
}
}
printf(" ]\n");
printf("\nVal indexes : [");
for ( int i = 0 ; i < mtx->nnz ; ++i ) {
printf(" %.6f", mtx->values[i]);
if ( i != mtx->nnz - 1 ) {
printf(" ;");
}
}
printf(" ]\n");
return;
}
int draw_coo_matrix ( const struct COO_Matrix *mtx, const char *filename )
{
int dim = 0;
if ( mtx->nnz == 0 ) {
return EXIT_FAILURE;
}
/*
* we can find out he dimensions of the square matrix
* by calculating max of (mtx->rows,mtx->cols)
*/
dim = mtx->rows[mtx->nnz - 1]; /* mtx.rows is ordered */
for ( int i = 0 ; i < mtx->nnz ; ++i ) {
dim = mtx->cols[i] > dim ? mtx->cols[i] : dim;
}
++dim;
/************************************************************/
/*
* opening file in binary mode for writing
*/
FILE *stream = fopen(filename, "wb");
if (stream == NULL) {
file_opening_failure(filename);
}
/*
* initializing write structure
*/
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (png_ptr == NULL) {
fprintf(stderr, "Error while allocating write struct\n");
}
/*
* initializing info structure
*/
png_infop info_ptr = png_create_info_struct(png_ptr);
if (info_ptr == NULL) {
fprintf(stderr, "Error while allocating info struct\n");
}
/*
* setting error handling
*/
if (setjmp(png_jmpbuf(png_ptr))) {
fprintf(stderr, "Error while creating png\n");
}
png_init_io(png_ptr, stream);
/*
* writing header (1 bit colour depth)
*/
png_set_IHDR(png_ptr, info_ptr, dim, dim,
8, PNG_COLOR_TYPE_GRAY, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info_ptr);
/*
* creating the image row by row
*/
png_bytep row = (png_bytep) malloc(dim * sizeof(png_byte));
for ( int i = 0 ; i < dim ; ++i ) {
#pragma omp parallel for
for ( int j = 0 ; j < dim ; ++j ) {
if ( get_value_coo_matrix(mtx, i, j) ) {
row[j] = 0xff;
}
else {
row[j] = 0x00;
}
}
if ( verbose ) {
printf("Writing line %d\n", i);
}
png_write_row(png_ptr, row);
}
/*
* end of writing
*/
png_write_end(png_ptr, info_ptr);
free(row);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(stream);
return EXIT_SUCCESS;
}
void destruct_coo_matrix ( struct COO_Matrix *mtx )
{
free(mtx->rows);
mtx->rows = NULL;
free(mtx->cols);
mtx->cols = NULL;
free(mtx->values);
mtx->values = NULL;
}
|
mm.c | /*
* Programa testado no PARCODE
*
* Sequencial:
* real 1m56.258s
* user 1m56.081s
* sys 0m0.080s
*
*
* Paralelo:
* real 0m37.209s
* user 2m28.415s
* sys 0m0.140s
*/
#include <stdio.h>
#include <stdlib.h>
void mm(double *a, double *b, double *c, int width)
{
#pragma omp parallel for schedule(guided)
for (int i = 0; i < width; i++)
{
for (int j = 0; j < width; j++)
{
double sum = 0;
for (int k = 0; k < width; k++)
{
double x = a[i * width + k];
double y = b[k * width + j];
sum += x * y;
}
c[i * width + j] = sum;
}
}
}
int main()
{
int width = 2000;
double *a = (double *)malloc(width * width * sizeof(double));
double *b = (double *)malloc(width * width * sizeof(double));
double *c = (double *)malloc(width * width * sizeof(double));
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < width; i++)
{
#pragma omp simd
for (int j = 0; j < width; j++)
{
a[i * width + j] = i;
b[i * width + j] = j;
c[i * width + j] = 0;
}
}
mm(a, b, c, width);
// for(int i = 0; i < width; i++) {
// for(int j = 0; j < width; j++) {
// printf("\n c[%d][%d] = %f",i,j,c[i*width+j]);
// }
// }
}
|
gamma_index_ivfpq.h | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This faiss source code is licensed under the MIT license.
* https://github.com/facebookresearch/faiss/blob/master/LICENSE
*
*
* The works below are modified based on faiss:
* 1. Replace the static batch indexing with real time indexing
* 2. Add the fine-grained sort after PQ coarse sort
* 3. Add the numeric field and bitmap filters in the process of searching
*
* Modified works copyright 2019 The Gamma Authors.
*
* The modified codes are licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*
*/
#ifndef GAMMA_INDEX_IVFPQ_H_
#define GAMMA_INDEX_IVFPQ_H_
#include <unistd.h>
#include <atomic>
#include "faiss/IndexIVF.h"
#include "faiss/IndexIVFPQ.h"
#include "faiss/VectorTransform.h"
#include "faiss/IndexHNSW.h"
#include "faiss/InvertedLists.h"
#include "faiss/impl/FaissAssert.h"
#include "faiss/impl/io.h"
#include "faiss/index_io.h"
#include "faiss/utils/Heap.h"
#include "faiss/utils/distances.h"
#include "faiss/utils/hamming.h"
#include "faiss/utils/utils.h"
#include "table/field_range_index.h"
#include "common/gamma_common_data.h"
#include "gamma_index_flat.h"
#include "gamma_scanner.h"
#include "util/log.h"
#include "vector/memory_raw_vector.h"
#include "vector/raw_vector.h"
#include "realtime/realtime_invert_index.h"
#include "index/retrieval_model.h"
#include "util/utils.h"
namespace tig_gamma {
/// statistics are robust to internal threading, but not if
/// IndexIVFPQ::search_preassigned is called by multiple threads
struct IndexIVFPQStats {
size_t nrefine; // nb of refines (IVFPQR)
size_t n_hamming_pass;
// nb of passed Hamming distance tests (for polysemous)
// timings measured with the CPU RTC
// on all threads
size_t search_cycles;
size_t refine_cycles; // only for IVFPQR
IndexIVFPQStats() { reset(); }
void reset(){};
};
// global var that collects them all
extern IndexIVFPQStats indexIVFPQ_stats;
// namespace {
using idx_t = faiss::Index::idx_t;
static uint64_t get_cycles() {
#ifdef __x86_64__
uint32_t high, low;
asm volatile("rdtsc \n\t" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | (low);
#else
return 0;
#endif
}
#define TIC t0 = get_cycles()
#define TOC get_cycles() - t0
/** QueryTables manages the various ways of searching an
* IndexIVFPQ. The code contains a lot of branches, depending on:
* - metric_type: are we computing L2 or Inner product similarity?
* - by_residual: do we encode raw vectors or residuals?
* - use_precomputed_table: are x_R|x_C tables precomputed?
* - polysemous_ht: are we filtering with polysemous codes?
*/
struct QueryTables {
/*****************************************************
* General data from the IVFPQ
*****************************************************/
const faiss::IndexIVFPQ &ivfpq;
const faiss::IVFSearchParameters *params;
// copied from IndexIVFPQ for easier access
int d;
const faiss::ProductQuantizer &pq;
faiss::MetricType metric_type;
bool by_residual;
int use_precomputed_table;
int polysemous_ht;
// pre-allocated data buffers
float *sim_table, *sim_table_2;
float *residual_vec, *decoded_vec;
// single data buffer
std::vector<float> mem;
// for table pointers
std::vector<const float *> sim_table_ptrs;
explicit QueryTables(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params,
faiss::MetricType metric_type)
: ivfpq(ivfpq),
d(ivfpq.d),
pq(ivfpq.pq),
metric_type(metric_type),
by_residual(ivfpq.by_residual),
use_precomputed_table(ivfpq.use_precomputed_table) {
mem.resize(pq.ksub * pq.M * 2 + d * 2);
sim_table = mem.data();
sim_table_2 = sim_table + pq.ksub * pq.M;
residual_vec = sim_table_2 + pq.ksub * pq.M;
decoded_vec = residual_vec + d;
// for polysemous
polysemous_ht = ivfpq.polysemous_ht;
if (auto ivfpq_params =
dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) {
polysemous_ht = ivfpq_params->polysemous_ht;
}
if (polysemous_ht != 0) {
q_code.resize(pq.code_size);
}
init_list_cycles = 0;
sim_table_ptrs.resize(pq.M);
}
/*****************************************************
* What we do when query is known
*****************************************************/
// field specific to query
const float *qi;
// query-specific intialization
void init_query(const float *qi) {
this->qi = qi;
if (metric_type == faiss::METRIC_INNER_PRODUCT)
init_query_IP();
else
init_query_L2();
if (!by_residual && polysemous_ht != 0) pq.compute_code(qi, q_code.data());
}
void init_query_IP() {
// precompute some tables specific to the query qi
pq.compute_inner_prod_table(qi, sim_table);
}
void init_query_L2() {
if (!by_residual) {
pq.compute_distance_table(qi, sim_table);
} else if (use_precomputed_table) {
pq.compute_inner_prod_table(qi, sim_table_2);
}
}
/*****************************************************
* When inverted list is known: prepare computations
*****************************************************/
// fields specific to list
long key;
float coarse_dis;
std::vector<uint8_t> q_code;
uint64_t init_list_cycles;
/// once we know the query and the centroid, we can prepare the
/// sim_table that will be used for accumulation
/// and dis0, the initial value
float precompute_list_tables() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
dis0 = precompute_list_tables_IP();
else
dis0 = precompute_list_tables_L2();
}
init_list_cycles += TOC;
return dis0;
}
float precompute_list_table_pointers() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
FAISS_THROW_MSG("not implemented");
else
dis0 = precompute_list_table_pointers_L2();
}
init_list_cycles += TOC;
return dis0;
}
/*****************************************************
* compute tables for inner prod
*****************************************************/
float precompute_list_tables_IP() {
// prepare the sim_table that will be used for accumulation
// and dis0, the initial value
ivfpq.quantizer->reconstruct(key, decoded_vec);
// decoded_vec = centroid
float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d);
if (polysemous_ht) {
for (int i = 0; i < d; i++) {
residual_vec[i] = qi[i] - decoded_vec[i];
}
pq.compute_code(residual_vec, q_code.data());
}
return dis0;
}
/*****************************************************
* compute tables for L2 distance
*****************************************************/
float precompute_list_tables_L2() {
float dis0 = 0;
if (use_precomputed_table == 0 || use_precomputed_table == -1) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_distance_table(residual_vec, sim_table);
if (polysemous_ht != 0) {
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 1) {
dis0 = coarse_dis;
faiss::fvec_madd(pq.M * pq.ksub,
&ivfpq.precomputed_table[key * pq.ksub * pq.M], -2.0,
sim_table_2, sim_table);
if (polysemous_ht != 0) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
const float *qtab = sim_table_2; // query-specific table
float *ltab = sim_table; // (output) list-specific table
long k = key;
for (size_t cm = 0; cm < cpq.M; cm++) {
// compute PQ index
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
// get corresponding table
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
if (polysemous_ht == 0) {
// sum up with query-specific table
faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab);
ltab += Mf * pq.ksub;
qtab += Mf * pq.ksub;
} else {
for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) {
q_code[m] =
faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab);
pc += pq.ksub;
ltab += pq.ksub;
qtab += pq.ksub;
}
}
}
}
return dis0;
}
float precompute_list_table_pointers_L2() {
float dis0 = 0;
if (use_precomputed_table == 1) {
dis0 = coarse_dis;
const float *s = &ivfpq.precomputed_table[key * pq.ksub * pq.M];
for (size_t m = 0; m < pq.M; m++) {
sim_table_ptrs[m] = s;
s += pq.ksub;
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
long k = key;
int m0 = 0;
for (size_t cm = 0; cm < cpq.M; cm++) {
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
for (int m = m0; m < m0 + Mf; m++) {
sim_table_ptrs[m] = pc;
pc += pq.ksub;
}
m0 += Mf;
}
} else {
FAISS_THROW_MSG("need precomputed tables");
}
if (polysemous_ht) {
FAISS_THROW_MSG("not implemented");
// Not clear that it makes sense to implemente this,
// because it costs M * ksub, which is what we wanted to
// avoid with the tables pointers.
}
return dis0;
}
};
template <class C>
struct KnnSearchResults {
idx_t key;
const idx_t *ids;
// heap params
size_t k;
float *heap_sim;
idx_t *heap_ids;
size_t nup;
inline void add(idx_t j, float dis) {
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
idx_t id = ids ? ids[j] : (key << 32 | j);
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
};
/*****************************************************
* Scaning the codes.
* The scanning functions call their favorite precompute_*
* function to precompute the tables they need.
*****************************************************/
template <typename IDType, faiss::MetricType METRIC_TYPE>
struct IVFPQScannerT : QueryTables {
const uint8_t *list_codes;
const IDType *list_ids;
size_t list_size;
explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: QueryTables(ivfpq, params, METRIC_TYPE) {
FAISS_THROW_IF_NOT(pq.nbits == 8);
}
float dis0;
void init_list(idx_t list_no, float coarse_dis, int mode) {
this->key = list_no;
this->coarse_dis = coarse_dis;
if (mode == 2) {
dis0 = precompute_list_tables();
} else if (mode == 1) {
dis0 = precompute_list_table_pointers();
}
}
/// tables are not precomputed, but pointers are provided to the
/// relevant X_c|x_r tables
template <class SearchResultType>
void scan_list_with_pointer(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
for (size_t j = 0; j < ncode; j++) {
float dis = dis0;
const float *tab = sim_table_2;
for (size_t m = 0; m < pq.M; m++) {
int ci = *codes++;
dis += sim_table_ptrs[m][ci] - 2 * tab[ci];
tab += pq.ksub;
}
res.add(j, dis);
}
}
/// nothing is precomputed: access residuals on-the-fly
template <class SearchResultType>
void scan_on_the_fly_dist(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
const float *dvec;
float dis0 = 0;
if (by_residual) {
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
ivfpq.quantizer->reconstruct(key, residual_vec);
dis0 = faiss::fvec_inner_product(residual_vec, qi, d);
} else {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
}
dvec = residual_vec;
} else {
dvec = qi;
dis0 = 0;
}
for (size_t j = 0; j < ncode; j++) {
pq.decode(codes, decoded_vec);
codes += pq.code_size;
float dis;
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d);
} else {
dis = faiss::fvec_L2sqr(decoded_vec, dvec, d);
}
res.add(j, dis);
}
}
/*****************************************************
* Scanning codes with polysemous filtering
*****************************************************/
template <class HammingComputer, class SearchResultType>
void scan_list_polysemous_hc(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
int ht = ivfpq.polysemous_ht;
size_t n_hamming_pass = 0;
int code_size = pq.code_size;
HammingComputer hc(q_code.data(), code_size);
for (size_t j = 0; j < ncode; j++) {
const uint8_t *b_code = codes;
int hd = hc.hamming(b_code);
if (hd < ht) {
n_hamming_pass++;
float dis = dis0;
const float *tab = sim_table;
for (size_t m = 0; m < pq.M; m++) {
dis += tab[*b_code++];
tab += pq.ksub;
}
res.add(j, dis);
}
codes += code_size;
}
#pragma omp critical
{ indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; }
}
template <class SearchResultType>
void scan_list_polysemous(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
switch (pq.code_size) {
#define HANDLE_CODE_SIZE(cs) \
case cs: \
scan_list_polysemous_hc<faiss::HammingComputer##cs, SearchResultType>( \
ncode, codes, res); \
break
HANDLE_CODE_SIZE(4);
HANDLE_CODE_SIZE(8);
HANDLE_CODE_SIZE(16);
HANDLE_CODE_SIZE(20);
HANDLE_CODE_SIZE(32);
HANDLE_CODE_SIZE(64);
#undef HANDLE_CODE_SIZE
default:
if (pq.code_size % 8 == 0)
scan_list_polysemous_hc<faiss::HammingComputerM8, SearchResultType>(
ncode, codes, res);
else
scan_list_polysemous_hc<faiss::HammingComputerM4, SearchResultType>(
ncode, codes, res);
break;
}
}
};
/* struct GammaInvertedListScanner : faiss::InvertedListScanner { */
/* GammaInvertedListScanner() { retrieval_context_ = nullptr; } */
/* virtual size_t scan_codes_pointer(size_t ncode, const uint8_t **codes, */
/* const idx_t *ids, float *heap_sim, */
/* idx_t *heap_ids, size_t k) = 0; */
/* void set_search_context(RetrievalContext *retrieval_context) { */
/* this->retrieval_context_ = retrieval_context; */
/* } */
/* RetrievalContext *retrieval_context_; */
/* }; */
template <faiss::MetricType metric, class C>
struct GammaIVFFlatScanner : GammaInvertedListScanner {
size_t d;
GammaIVFFlatScanner(size_t d) : d(d) {}
const float *xi;
void set_query(const float *query) override { this->xi = query; }
idx_t list_no;
void set_list(idx_t list_no, float /* coarse_dis */) override {
this->list_no = list_no;
}
float distance_to_code(const uint8_t *code) const override {
const float *yj = (float *)code;
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
return dis;
}
inline size_t scan_codes(size_t list_size, const uint8_t *codes,
const idx_t *ids, float *simi, idx_t *idxi,
size_t k) const override {
RawVector *raw_vec = (RawVector *)codes;
size_t nup = 0;
for (size_t j = 0; j < list_size; j++) {
if (ids[j] & realtime::kDelIdxMask) continue;
idx_t vid = ids[j] & realtime::kRecoverIdxMask;
if (vid < 0) continue;
if (retrieval_context_->IsValid(vid) == false) continue;
ScopeVector svec;
raw_vec->GetVector(vid, svec);
const float *yj = (const float *)svec.Get();
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
if (retrieval_context_->IsSimilarScoreValid(dis) && C::cmp(simi[0], dis)) {
faiss::heap_pop<C>(k, simi, idxi);
faiss::heap_push<C>(k, simi, idxi, dis, vid);
nup++;
}
}
return nup;
}
size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim, idx_t *heap_ids,
size_t k) {
return 0;
}
};
class IVFPQRetrievalParameters : public RetrievalParameters {
public:
IVFPQRetrievalParameters() : RetrievalParameters() {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = -1;
}
IVFPQRetrievalParameters(bool parallel_on_queries, int recall_num, int nprobe,
enum DistanceComputeType type) {
parallel_on_queries_ = parallel_on_queries;
recall_num_ = recall_num;
nprobe_ = nprobe;
distance_compute_type_ = type;
}
IVFPQRetrievalParameters(enum DistanceComputeType type) {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = -1;
distance_compute_type_ = type;
}
virtual ~IVFPQRetrievalParameters() {}
int RecallNum() { return recall_num_; }
void SetRecallNum(int recall_num) { recall_num_ = recall_num; }
int Nprobe() { return nprobe_; }
void SetNprobe(int nprobe) { nprobe_ = nprobe; }
bool ParallelOnQueries() { return parallel_on_queries_; }
void SetParallelOnQueries(bool parallel_on_queries) {
parallel_on_queries_ = parallel_on_queries;
}
protected:
// parallelize over queries or ivf lists
bool parallel_on_queries_;
int recall_num_;
int nprobe_;
};
struct IVFPQModelParams;
struct GammaIVFPQIndex : GammaFLATIndex, faiss::IndexIVFPQ {
GammaIVFPQIndex();
virtual ~GammaIVFPQIndex();
faiss::InvertedListScanner *get_InvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaIVFFlatScanner(
size_t d, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaInvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
int Init(const std::string &model_parameters, int indexing_size) override;
RetrievalParameters *Parse(const std::string ¶meters) override;
int Indexing() override;
bool Add(int n, const uint8_t *vec);
int Update(const std::vector<int64_t> &ids,
const std::vector<const uint8_t *> &vecs);
// assign the vectors, then call search_preassign
int Search(RetrievalContext *retrieval_context, int n, const uint8_t *x,
int k, float *distances, idx_t *labels);
void search_preassigned(RetrievalContext *retrieval_context, int n,
const float *x, const float *applied_x, int k, const idx_t *keys,
const float *coarse_dis, float *distances,
idx_t *labels, int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
long GetTotalMemBytes() override {
if (!rt_invert_index_ptr_) {
return 0;
}
return rt_invert_index_ptr_->GetTotalMemBytes();
}
int Dump(const std::string &dir) override;
int Load(const std::string &index_dir) override;
virtual void copy_subset_to(faiss::IndexIVF &other, int subset_type, idx_t a1,
idx_t a2) const;
int Delete(const std::vector<int64_t> &ids);
void train(int64_t n, const float *x) { faiss::IndexIVFPQ::train(n, x); }
int indexed_vec_count_;
realtime::RTInvertIndex *rt_invert_index_ptr_;
bool compaction_;
size_t compact_bucket_no_;
uint64_t compacted_num_;
uint64_t updated_num_;
int d_;
DistanceComputeType metric_type_;
faiss::VectorTransform *opq_;
// 0 is FlatL2, 1 is HNSWFlat
int quantizer_type_;
#ifdef PERFORMANCE_TESTING
std::atomic<uint64_t> search_count_;
int add_count_;
#endif
IVFPQModelParams *model_param_;
};
template <faiss::MetricType METRIC_TYPE, class C, int precompute_mode>
struct GammaIVFPQScanner : IVFPQScannerT<idx_t, METRIC_TYPE>,
GammaInvertedListScanner {
const GammaIVFPQIndex &gamma_ivfpq_;
bool store_pairs_;
GammaIVFPQScanner(const GammaIVFPQIndex &gamma_ivfpq, bool store_pairs)
: IVFPQScannerT<idx_t, METRIC_TYPE>(gamma_ivfpq, nullptr),
gamma_ivfpq_(gamma_ivfpq) {
store_pairs_ = store_pairs;
}
template <class SearchResultType>
void scan_list_with_table(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
size_t j = 0;
for (; j < ncode; j++) {
if (res.ids[j] & realtime::kDelIdxMask) {
codes += this->pq.M;
continue;
}
if (!retrieval_context_->IsValid(res.ids[j] &
realtime::kRecoverIdxMask)) {
codes += this->pq.M;
continue;
}
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*codes++];
tab += this->pq.ksub;
}
res.add(j, dis);
}
assert(j == ncode);
}
inline void set_query(const float *query) override {
this->init_query(query);
}
inline void set_list(idx_t list_no, float coarse_dis) override {
this->init_list(list_no, coarse_dis, precompute_mode);
}
inline float distance_to_code(const uint8_t *code) const override {
assert(precompute_mode == 2);
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*code++];
tab += this->pq.ksub;
}
return dis;
}
inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids,
float *heap_sim, idx_t *heap_ids,
size_t k) const override {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (this->polysemous_ht > 0) {
assert(precompute_mode == 2);
this->scan_list_polysemous(ncode, codes, res);
} else if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else if (precompute_mode == 1) {
this->scan_list_with_pointer(ncode, codes, res);
} else if (precompute_mode == 0) {
this->scan_on_the_fly_dist(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
};
} // namespace tig_gamma
#endif
|
grad_traj_optimizer.h | #ifndef _GRAD_TRAJ_OPTIMIZER_H_
#define _GRAD_TRAJ_OPTIMIZER_H_
#include <Eigen/Eigen>
#include <nlopt.hpp>
// just use to get signed distance field
#include <ros/ros.h>
// sdf_tools
#include "sdf_tools/collision_map.hpp"
#include "sdf_tools/sdf.hpp"
#include "qp_generator.h"
#define GDTB getDistanceToBoundary
#define OPT_INITIAL_TRY 0
#define OPT_FIRST_STEP 1
#define OPT_SECOND_STEP 2
using namespace std;
class GradTrajOptimizer
{
public:
GradTrajOptimizer(ros::NodeHandle &node, const vector<Eigen::Vector3d> &waypoints);
bool optimizeTrajectory(int step);
void getCoefficient(Eigen::MatrixXd &coeff);
void getSegmentTime(Eigen::VectorXd &T);
void setSignedDistanceField(sdf_tools::SignedDistanceField *sdf, double resolution);
private:
/** signed distance field */
sdf_tools::SignedDistanceField *sdf;
/** virtual boundary, 6x1 vector, for min x max x... min z,max z */
mutable Eigen::VectorXd boundary;
/** coefficient of polynomials*/
Eigen::MatrixXd coeff;
/** important matrix and variables*/
Eigen::MatrixXd A;
Eigen::MatrixXd C;
Eigen::MatrixXd L;
Eigen::MatrixXd R;
Eigen::MatrixXd Rff;
Eigen::MatrixXd Rpp;
Eigen::MatrixXd Rpf;
Eigen::MatrixXd Rfp;
Eigen::VectorXd Time;
Eigen::VectorXd origin_time;
Eigen::MatrixXd V;
Eigen::MatrixXd Df;
Eigen::MatrixXd Dp;
Eigen::MatrixXd origin_dp;
Eigen::MatrixXd initial_dp;
Eigen::MatrixXd path;
int num_dp;
int num_df;
int num_point;
mutable int iter_num = 0;
mutable double total_time = 0.0;
int step = 1;
double resolution;
int algorithm;
double time_limit_1;
double time_limit_2;
double try_limit;
double offset;
double deltat;
double bos;
double vos;
double aos;
double gd_value;
int gd_type;
double retry_offset;
/** dynamics parameter from param server*/
double w_smooth;
double w_collision;
double w_collision_temp;
double d0;
double r;
double alpha;
double v0;
double rv;
double alphav;
double a0;
double ra;
double alphaa;
double sgm_time;
double init_time;
double mean_v;
/** optimizer*/
nlopt::opt optimizer;
/** main computation function,get smoothness, collision ,velocity ,accleration cost and gradient*/
void getCostAndGradient(std::vector<double> dp, double &cost, std::vector<double> &grad) const;
/** cost function of optimization */
static double costFunc(const std::vector<double> &x, std::vector<double> &grad, void *func_data);
/** convert derivatives of end points to polynomials coefficient */
void getCoefficientFromDerivative(Eigen::MatrixXd &coeff, const std::vector<double> &_dp) const;
/** get distance and gradient in signed distance field ,by position query*/
void getDistanceAndGradient(Eigen::Vector3d &pos, double &dist, Eigen::Vector3d &grad) const;
void getPositionFromCoeff(Eigen::Vector3d &pos, const Eigen::MatrixXd &coeff, const int &index,
const double &time) const;
void getVelocityFromCoeff(Eigen::Vector3d &vel, const Eigen::MatrixXd &coeff, const int &index,
const double &time) const;
void getAccelerationFromCoeff(Eigen::Vector3d &acc, const Eigen::MatrixXd &coeff,
const int &index, const double &time) const;
/** penalty and gradient */
void getDistancePenalty(const double &distance, double &cost) const;
void getDistancePenaltyGradient(const double &distance, double &grad) const;
void getVelocityPenalty(const double &distance, double &cost) const;
void getVelocityPenaltyGradient(const double &vel, double &grad) const;
void getAccelerationPenalty(const double &distance, double &cost) const;
void getAccelerationPenaltyGradient(const double &acc, double &grad) const;
void getTimeMatrix(const double &t, Eigen::MatrixXd &T) const;
void constrains(double &n, double min, double max) const;
bool pathOutsideBoundary() const;
void createNewVirtualBoundary() const;
double getDistanceToBoundary(const double &x, const double &y, const double &z) const;
void recaluculateGradient(const double &x, const double &y, const double &z,
Eigen ::Vector3d &grad) const;
void tryDifferentParameter();
};
GradTrajOptimizer::GradTrajOptimizer(ros::NodeHandle &node,
const vector<Eigen::Vector3d> &way_points)
{
boundary = Eigen::VectorXd::Zero(6);
//-------------------------get parameter from server--------------------
ros::param::get("/traj_opti_node1/alg", this->algorithm);
ros::param::get("/traj_opti_node1/time_limit_1", this->time_limit_1);
ros::param::get("/traj_opti_node1/time_limit_2", this->time_limit_2);
ros::param::get("/traj_opti_node1/try_limit", this->try_limit);
ros::param::get("/traj_opti_node1/offset", this->offset);
ros::param::get("/traj_opti_node1/dt", this->deltat);
ros::param::get("/traj_opti_node1/retry_offset", this->retry_offset);
ros::param::get("/traj_opti_node1/ws", this->w_smooth);
ros::param::get("/traj_opti_node1/wc", this->w_collision);
ros::param::get("/traj_opti_node1/wc", this->w_collision_temp);
ros::param::get("/traj_opti_node1/alpha", this->alpha);
ros::param::get("/traj_opti_node1/r", this->r);
ros::param::get("/traj_opti_node1/d0", this->d0);
ros::param::get("/traj_opti_node1/alphav", this->alphav);
ros::param::get("/traj_opti_node1/rv", this->rv);
ros::param::get("/traj_opti_node1/v0", this->v0);
ros::param::get("/traj_opti_node1/alphaa", this->alphaa);
ros::param::get("/traj_opti_node1/ra", this->ra);
ros::param::get("/traj_opti_node1/a0", this->a0);
ros::param::get("/traj_opti_node1/bos", this->bos);
ros::param::get("/traj_opti_node1/vos", this->vos);
ros::param::get("/traj_opti_node1/aos", this->aos);
ros::param::get("/traj_opti_node1/gd_value", this->gd_value);
ros::param::get("/traj_opti_node1/gd_type", this->gd_type);
ros::param::get("/traj_opti_node1/segment_time", sgm_time);
ros::param::get("/traj_opti_node1/mean_v", mean_v);
ros::param::get("/traj_opti_node1/init_time", init_time);
//------------------------generate optimization dependency------------------
path = Eigen::MatrixXd::Zero(way_points.size(), 3);
for(int i = 0; i < way_points.size(); ++i)
path.row(i) = way_points[i].transpose();
Time = Eigen::VectorXd::Zero(way_points.size() - 1);
for(int i = 0; i < (way_points.size() - 1); ++i)
{
// double len= sqrt(pow(path(i, 0) - path(i + 1, 0), 2) + pow(path(i, 1) - path(i + 1, 1), 2) +
// pow(path(i, 2) - path(i + 1, 2), 2));
double len = (path.row(i) - path.row(i + 1)).norm();
Time(i) = max(len / mean_v, sgm_time);
if(i == 0 || i == way_points.size() - 2)
Time(i) += init_time;
}
origin_time = Time;
Eigen::Vector3d vel, acc;
vel.setZero();
acc.setZero();
int type = 2;
TrajectoryGenerator generator;
coeff = generator.PolyQPGeneration(path, vel, acc, Time, type);
generator.StackOptiDep();
R = generator.getR();
Rff = generator.getRff();
Rpp = generator.getRpp();
Rpf = generator.getRpf();
Rfp = generator.getRfp();
L = generator.getL();
A = generator.getA();
C = generator.getC();
std::pair<Eigen::MatrixXd, Eigen::MatrixXd> d = generator.getInitialD();
initial_dp = origin_dp = Dp = d.first;
Df = d.second;
V.resize(6, 6);
for(int i = 0; i < 5; ++i)
V(i, i + 1) = i + 1;
num_dp = Dp.cols();
num_df = Df.cols();
num_point = Time.rows() + 1;
}
void GradTrajOptimizer::setSignedDistanceField(sdf_tools::SignedDistanceField *s, double res)
{
this->sdf = s;
this->resolution = res;
}
bool GradTrajOptimizer::pathOutsideBoundary() const
{
for(int i = 0; i < path.rows(); ++i)
{
if(path(i, 0) <= boundary(0) || path(i, 0) >= boundary(1))
return true;
else if(path(i, 1) <= boundary(2) || path(i, 1) >= boundary(3))
return true;
else if(path(i, 2) <= boundary(4) || path(i, 2) >= boundary(5))
return true;
}
return false;
}
void GradTrajOptimizer::createNewVirtualBoundary() const
{
// find the max and min x,y,z of path
double max_x = -1000.0, max_y = -1000.0, max_z = -1000.0, min_x = 1000.0, min_y = 1000.0,
min_z = 1000.0;
for(int i = 0; i < path.rows(); ++i)
{
// max x
if(path(i, 0) > max_x)
{
max_x = path(i, 0);
}
// max y
if(path(i, 1) > max_y)
{
max_y = path(i, 1);
}
// max z
if(path(i, 2) > max_z)
{
max_z = path(i, 2);
}
// min x
if(path(i, 0) < min_x)
{
min_x = path(i, 0);
}
// min y
if(path(i, 1) < min_y)
{
min_y = path(i, 1);
}
// min z
if(path(i, 2) < min_z)
{
min_z = path(i, 2);
}
}
// offset the min max box to create new boundary
static double os = this->offset;
boundary(0) = min_x - os;
boundary(1) = max_x + os;
boundary(2) = min_y - os;
boundary(3) = max_y + os;
boundary(4) = min_z - os;
boundary(5) = max_z + os;
// std::cout << "boundary:" << boundary << std::endl;
}
void GradTrajOptimizer::constrains(double &n, double min, double max) const
{
if(n > max)
n = max;
else if(n < min)
n = min;
}
void GradTrajOptimizer::tryDifferentParameter()
{
srand(ros::Time::now().toNSec() % int(2e7));
//--------------------- adjust polynomials segment time ----------------
double rdt = -0.5 + 1.0 * rand() / double(RAND_MAX);
for(int i = 0; i < this->Time.rows(); ++i)
{
Time(i) = origin_time(i) + rdt;
}
//--------------------- adjust optimization inital value ----------------
for(int i = 0; i < num_dp; ++i)
{
if(i % 3 == 0)
{
Dp(0, i) = origin_dp(0, i) - retry_offset + 2 * retry_offset * rand() / double(RAND_MAX);
Dp(1, i) = origin_dp(1, i) - retry_offset + 2 * retry_offset * rand() / double(RAND_MAX);
Dp(2, i) = origin_dp(2, i) - retry_offset + 2 * retry_offset * rand() / double(RAND_MAX);
initial_dp(0, i) = Dp(0, i);
initial_dp(1, i) = Dp(1, i);
initial_dp(2, i) = Dp(2, i);
}
}
//-----------------------adjust cost weight----------------------------
w_collision_temp = w_collision;
w_collision = 0.0;
}
bool GradTrajOptimizer::optimizeTrajectory(int step)
{
if(step != 0 && step != 1 && step != 2)
{
cout << "step number error, step should be 0, 1 or 2" << endl;
}
this->step = step;
// ---------------------create a virtual boundary, in avoidance of pushing the trajectory
// ---------------------infinitely far----------------------------------------
if(step == 1 && pathOutsideBoundary())
{
createNewVirtualBoundary();
}
// --------------------------initilize NLopt----------------------------------------
// nlopt::srand_time();
int seed = ros::Time::now().toNSec() % 65536;
nlopt::srand(seed);
nlopt::opt opt(nlopt::algorithm(this->algorithm), 3 * num_dp); // x,y,z (3*n-3) x 3
optimizer = opt;
optimizer.set_min_objective(GradTrajOptimizer::costFunc, this);
// optimizer.set_xtol_abs(1e-7);
// --------------------------step specific options-----------------------------
if(step == OPT_INITIAL_TRY)
{
optimizer.set_maxtime(try_limit);
}
else if(step == OPT_FIRST_STEP)
{
optimizer.set_maxtime(time_limit_1);
}
else if(step == OPT_SECOND_STEP)
{
optimizer.set_maxtime(time_limit_2);
}
// ---------------------------set upper and lower bound for dp--------------------
vector<double> lb, ub;
lb.resize(3 * num_dp);
ub.resize(3 * num_dp);
for(int i = 0; i < num_dp; ++i)
{
if(i % 3 == 0)
{
lb[i] = path(i / 3 + 1, 0) - bos;
lb[i + num_dp] = path(i / 3 + 1, 1) - bos;
lb[i + num_dp * 2] = path(i / 3 + 1, 2) - bos;
ub[i] = path(i / 3 + 1, 0) + bos;
ub[i + num_dp] = path(i / 3 + 1, 1) + bos;
ub[i + num_dp * 2] = path(i / 3 + 1, 2) + bos;
}
else if(i % 3 == 1)
{
lb[i] = -vos;
lb[i + num_dp] = -vos;
lb[i + 2 * num_dp] = -vos;
ub[i] = vos;
ub[i + num_dp] = vos;
ub[i + num_dp * 2] = vos;
}
else
{
lb[i] = -aos;
lb[i + num_dp] = -aos;
lb[i + 2 * num_dp] = -aos;
ub[i] = aos;
ub[i + num_dp] = aos;
ub[i + num_dp * 2] = aos;
}
}
optimizer.set_lower_bounds(lb);
optimizer.set_upper_bounds(ub);
// ---------------------------set initial value---------------------------
std::vector<double> _dp(3 * num_dp);
for(int i = 0; i < num_dp; ++i)
{
_dp[i] = Dp(0, i);
_dp[i + num_dp] = Dp(1, i);
_dp[i + 2 * num_dp] = Dp(2, i);
}
double min_f;
// ---------------------------optimize ---------------------------
cout << "-------------------begin optimization-------------------" << endl;
nlopt::result result = optimizer.optimize(_dp, min_f);
// ---------------------------display the result---------------------------
cout << "Optimized result is:" << result << endl;
// ---------------------------update optimized derivative---------------------------
Dp.setZero();
for(int i = 0; i < num_dp; ++i)
{
Dp(0, i) = _dp[i];
Dp(1, i) = _dp[i + num_dp];
Dp(2, i) = _dp[i + 2 * num_dp];
}
//----------------------------reallocate segment time--------------------------------
for(int i = 0; i < Time.size(); ++i)
{
double len = 0.0;
// head and tail segment length
if(i == 0)
{
len = sqrt(pow(Df(0, 0) - Dp(0, 0), 2) + pow(Df(1, 0) - Dp(1, 0), 2) +
pow(Df(2, 0) - Dp(2, 0), 2));
}
else if(i == Time.size() - 1)
{
len = sqrt(pow(Df(0, 3) - Dp(0, 3 * (i - 1)), 2) + pow(Df(1, 3) - Dp(1, 3 * (i - 1)), 2) +
pow(Df(2, 3) - Dp(2, 3 * (i - 1)), 2));
}
else
// median segment length
{
len = sqrt(pow(Dp(0, 3 * (i - 1)) - Dp(0, 3 * i), 2) +
pow(Dp(1, 3 * (i - 1)) - Dp(1, 3 * i), 2) +
pow(Dp(2, 3 * (i - 1)) - Dp(2, 3 * i), 2));
}
Time(i) = max(len / mean_v, sgm_time);
if(i == 0 || i == Time.size() - 1)
Time(i) += init_time;
}
// ---------------------------update optimized coefficient---------------------------
getCoefficientFromDerivative(this->coeff, _dp);
//---------------------------show optimization time of two step---------------------------
if(step == 1 || step == 2)
{
cout << "total time:" << total_time << endl << "iterative num:" << iter_num << endl;
if(step == 2)
{
total_time = 0;
iter_num = 0;
}
}
return true;
}
void GradTrajOptimizer::getCoefficient(Eigen::MatrixXd &coe)
{
coe = this->coeff;
}
void GradTrajOptimizer::getSegmentTime(Eigen::VectorXd &T)
{
T = this->Time;
}
void GradTrajOptimizer::getCoefficientFromDerivative(Eigen::MatrixXd &coefficient,
const std::vector<double> &_dp) const
{
coefficient.resize(num_point - 1, 18);
for(int i = 0; i < 3; ++i)
{
//-----------------------merge df and dp -> d(df,dp)-----------------------
Eigen::VectorXd df(num_df);
Eigen::VectorXd dp(num_dp);
Eigen::VectorXd d(num_df + num_dp);
df = Df.row(i);
for(int j = 0; j < num_dp; j++)
{
dp(j) = _dp[j + num_dp * i];
}
d.segment(0, 6) = df;
d.segment(6, num_dp) = dp;
// ----------------------convert derivative to coefficient------------------
Eigen::VectorXd coe(6 * (num_point - 1));
coe = L * d;
for(int j = 0; j < (num_point - 1); j++)
{
coefficient.block(j, 6 * i, 1, 6) = coe.segment(6 * j, 6).transpose();
}
}
}
void GradTrajOptimizer::getCostAndGradient(std::vector<double> dp, double &cost,
std::vector<double> &_grad) const
{
// get total iterative number and time
iter_num++;
ros::Time tb1 = ros::Time::now();
// --------------------------initialize---------------------------------
cost = 0;
double cost_smooth = 0;
double cost_colli = 0;
double cost_vel = 0;
double cost_acc = 0;
Eigen::MatrixXd gradient = Eigen::MatrixXd::Zero(3, num_dp);
Eigen::MatrixXd g_smooth = Eigen::MatrixXd::Zero(3, num_dp);
Eigen::MatrixXd g_colli = Eigen::MatrixXd::Zero(3, num_dp);
Eigen::MatrixXd g_vel = Eigen::MatrixXd::Zero(3, num_dp);
Eigen::MatrixXd g_acc = Eigen::MatrixXd::Zero(3, num_dp);
// -----------get smoothness cost---------------------------------------
//-------------merge df and dp into d(df,dp)-----------------------------
// #pragma omp parallel sections
{
// #pragma omp section
{
Eigen::VectorXd dfx = Df.block(0, 0, 1, 6).transpose();
Eigen::VectorXd dfy = Df.block(1, 0, 1, 6).transpose();
Eigen::VectorXd dfz = Df.block(2, 0, 1, 6).transpose();
Eigen::VectorXd dpx = Eigen::VectorXd::Zero(num_dp);
Eigen::VectorXd dpy = Eigen::VectorXd::Zero(num_dp);
Eigen::VectorXd dpz = Eigen::VectorXd::Zero(num_dp);
for(int i = 0; i < num_dp; ++i)
{
dpx(i) = dp[i];
dpy(i) = dp[i + num_dp];
dpz(i) = dp[i + 2 * num_dp];
}
Eigen::VectorXd dx = Eigen::VectorXd::Zero(num_dp + num_df);
Eigen::VectorXd dy = Eigen::VectorXd::Zero(num_dp + num_df);
Eigen::VectorXd dz = Eigen::VectorXd::Zero(num_dp + num_df);
dx.segment(0, 6) = dfx;
dx.segment(6, num_dp) = dpx;
dy.segment(0, 6) = dfy;
dy.segment(6, num_dp) = dpy;
dz.segment(0, 6) = dfz;
dz.segment(6, num_dp) = dpz;
// -------------------get smoothness cost,fs= d'Rd-----------------------
cost_smooth = double(dx.transpose() * R * dx) + double(dy.transpose() * R * dy) +
(dz.transpose() * R * dz);
//-------------------- get smoothness gradient---------------------------
Eigen::MatrixXd gx_smooth = 2 * Rfp.transpose() * dfx + 2 * Rpp * dpx;
Eigen::MatrixXd gy_smooth = 2 * Rfp.transpose() * dfy + 2 * Rpp * dpy;
Eigen::MatrixXd gz_smooth = 2 * Rfp.transpose() * dfz + 2 * Rpp * dpz;
g_smooth.row(0) = gx_smooth.transpose();
g_smooth.row(1) = gy_smooth.transpose();
g_smooth.row(2) = gz_smooth.transpose();
}
// ---------------------get polynomials coefficient, for evaluating penalty----------------
// #pragma omp section
{
Eigen::MatrixXd coe;
getCoefficientFromDerivative(coe, dp);
// ------------------get coolision, velocity and acceleration cost and gradient by integrate
// -------------------along the trajectory-------------------------
Eigen::MatrixXd Ldp(6, num_dp);
// #pragma omp parallel for
for(int s = 0; s < Time.size(); s++)
{
if(fabs(w_collision) < 1e-4)
break;
// ------------------------get matrix Ldp-----------------------------------
Ldp = L.block(6 * s, 6, 6, num_dp);
//------------------------- discrete time step------------------------------
double dt = Time(s) / 15.0;
for(double t = 1e-3; t < Time(s); t += dt)
{
// ------------------------get position,velocity------------------------
Eigen::Vector3d pos, vel;
getPositionFromCoeff(pos, coe, s, t);
getVelocityFromCoeff(vel, coe, s, t);
double vel_norm = vel.norm() + 1e-5;
// ------------------------get information from signed distance field----------
double dist = 0, gd = 0, cd = 0;
Eigen::Vector3d grad;
getDistanceAndGradient(pos, dist, grad);
getDistancePenalty(dist, cd);
getDistancePenaltyGradient(dist, gd);
if(gd_type == 1)
{
}
else if(gd_type == 2)
{
gd = -gd_value;
}
//------------------------time Matrix T------------------------
Eigen::MatrixXd T(1, 6);
getTimeMatrix(t, T);
// #pragma omp atomic
// ------------------------ collision cost------------------------
cost_colli += cd * vel_norm * dt;
// #pragma omp critical
// ------------------------ gradient of collision cost------------------------
{
for(int k = 0; k < 3; k++)
{
g_colli.row(k) = g_colli.row(k) + (gd * grad(k) * cd * vel_norm * T * Ldp +
cd * (vel(k) / vel_norm) * T * V * Ldp) *
dt;
}
}
// ------------------------only in second step optimization ------------------------
//------------------------get velocity and accleration cost------------------------
if(step == 2)
{
double cv = 0, ca = 0, gv = 0, ga = 0;
Eigen::Vector3d acc;
getAccelerationFromCoeff(acc, coe, s, t);
// #pragma omp critical
{
for(int k = 0; k < 3; k++)
{
// ------------------------get velocity cost------------------------
getVelocityPenalty(vel(k), cv);
cost_vel += cv * vel_norm * dt;
//------------------------ get acceleration cost------------------------
getAccelerationPenalty(acc(k), ca);
cost_acc += ca * vel_norm * dt;
}
}
// #pragma omp critical
{
for(int k = 0; k < 3; k++)
{
// ------------------------get velocity gradient------------------------
getVelocityPenaltyGradient(vel(k), gv);
g_vel.row(k) =
g_vel.row(k) +
(gv * vel_norm * T * V * Ldp + cv * (vel(k) / vel_norm) * T * V * Ldp) * dt;
// ------------------------get acceleration gradient------------------------
getAccelerationPenaltyGradient(acc(k), ga);
g_acc.row(k) =
g_acc.row(k) +
(ga * vel_norm * T * V * V * Ldp + ca * (vel(k) / vel_norm) * T * V * Ldp) * dt;
}
}
}
}
}
}
}
//------------------------ sum up all cost------------------------
double ws = this->w_smooth, wc = this->w_collision, wv = 1.0, wa = 1.0;
if(step == OPT_INITIAL_TRY)
{
// wc= 0.0;
}
else if(step == OPT_FIRST_STEP)
{
ws = 0.0;
// wc= 1.0;
}
else if(step == OPT_SECOND_STEP)
{
// ws= 4.0;
// wc= 1.0;
}
cost = ws * cost_smooth + wc * cost_colli + wv * cost_vel + wa * cost_acc + 1e-3;
// cout << "smooth cost:" << ws * cost_smooth << " collision cost" << wc * cost_colli
// << " vel cost " << wv * cost_vel << " acc cost: " << wa * cost_acc << " total:" << cost
// << endl;
// ------------------------sum up all gradient and convert ------------------------
gradient = ws * g_smooth + wc * g_colli + wv * g_vel + wa * g_acc;
_grad.resize(num_dp * 3);
for(int i = 0; i < num_dp; ++i)
{
_grad[i] = gradient(0, i) + 1e-5;
_grad[i + num_dp] = gradient(1, i) + 1e-5;
_grad[i + 2 * num_dp] = gradient(2, i) + 1e-5;
}
// ------------------------get total time------------------------
ros::Time te1 = ros::Time::now();
total_time += (te1.toSec() - tb1.toSec());
}
// get position from coefficient
void GradTrajOptimizer::getPositionFromCoeff(Eigen::Vector3d &pos, const Eigen::MatrixXd &coeff,
const int &index, const double &time) const
{
int s = index;
double t = time;
float x = coeff(s, 0) + coeff(s, 1) * t + coeff(s, 2) * pow(t, 2) + coeff(s, 3) * pow(t, 3) +
coeff(s, 4) * pow(t, 4) + coeff(s, 5) * pow(t, 5);
float y = coeff(s, 6) + coeff(s, 7) * t + coeff(s, 8) * pow(t, 2) + coeff(s, 9) * pow(t, 3) +
coeff(s, 10) * pow(t, 4) + coeff(s, 11) * pow(t, 5);
float z = coeff(s, 12) + coeff(s, 13) * t + coeff(s, 14) * pow(t, 2) + coeff(s, 15) * pow(t, 3) +
coeff(s, 16) * pow(t, 4) + coeff(s, 17) * pow(t, 5);
pos(0) = x;
pos(1) = y;
pos(2) = z;
}
// get velocity from cofficient
void GradTrajOptimizer::getVelocityFromCoeff(Eigen::Vector3d &vel, const Eigen::MatrixXd &coeff,
const int &index, const double &time) const
{
int s = index;
double t = time;
float vx = coeff(s, 1) + 2 * coeff(s, 2) * pow(t, 1) + 3 * coeff(s, 3) * pow(t, 2) +
4 * coeff(s, 4) * pow(t, 3) + 5 * coeff(s, 5) * pow(t, 4);
float vy = coeff(s, 7) + 2 * coeff(s, 8) * pow(t, 1) + 3 * coeff(s, 9) * pow(t, 2) +
4 * coeff(s, 10) * pow(t, 3) + 5 * coeff(s, 11) * pow(t, 4);
float vz = coeff(s, 13) + 2 * coeff(s, 14) * pow(t, 1) + 3 * coeff(s, 15) * pow(t, 2) +
4 * coeff(s, 16) * pow(t, 3) + 5 * coeff(s, 17) * pow(t, 4);
vel(0) = vx;
vel(1) = vy;
vel(2) = vz;
}
// get acceleration from coefficient
void GradTrajOptimizer::getAccelerationFromCoeff(Eigen::Vector3d &acc, const Eigen::MatrixXd &coeff,
const int &index, const double &time) const
{
int s = index;
double t = time;
float ax = 2 * coeff(s, 2) + 6 * coeff(s, 3) * pow(t, 1) + 12 * coeff(s, 4) * pow(t, 2) +
20 * coeff(s, 5) * pow(t, 3);
float ay = 2 * coeff(s, 8) + 6 * coeff(s, 9) * pow(t, 1) + 12 * coeff(s, 10) * pow(t, 2) +
20 * coeff(s, 11) * pow(t, 3);
float az = 2 * coeff(s, 14) + 6 * coeff(s, 15) * pow(t, 1) + 12 * coeff(s, 16) * pow(t, 2) +
20 * coeff(s, 17) * pow(t, 3);
acc(0) = ax;
acc(1) = ay;
acc(2) = az;
}
inline void GradTrajOptimizer::getDistancePenalty(const double &d, double &cost) const
{
cost = this->alpha * exp(-(d - this->d0) / this->r);
}
inline void GradTrajOptimizer::getDistancePenaltyGradient(const double &d, double &grad) const
{
grad = -(this->alpha / this->r) * exp(-(d - this->d0) / this->r);
}
inline void GradTrajOptimizer::getVelocityPenalty(const double &v, double &cost) const
{
cost = alphav * exp((abs(v) - v0) / rv);
}
inline void GradTrajOptimizer::getVelocityPenaltyGradient(const double &v, double &grad) const
{
grad = (alphav / rv) * exp((abs(v) - v0) / rv);
}
inline void GradTrajOptimizer::getAccelerationPenalty(const double &a, double &cost) const
{
cost = alphaa * exp((abs(a) - a0) / ra);
}
inline void GradTrajOptimizer::getAccelerationPenaltyGradient(const double &a, double &grad) const
{
grad = (alphaa / ra) * exp((abs(a) - a0) / ra);
}
// get distance in signed distance field ,by position query
void GradTrajOptimizer::getDistanceAndGradient(Eigen::Vector3d &pos, double &dist,
Eigen::Vector3d &grad) const
{
// get sdf directly from sdf_tools
Eigen::Vector3d ori_pos = pos;
constrains(pos(0), -9.8, 9.8);
constrains(pos(1), -9.8, 9.8);
constrains(pos(2), 0.2, 4.8);
std::vector<double> location_gradient_query =
this->sdf->GetGradient(pos(0), pos(1), pos(2), true);
grad(0) = location_gradient_query[0];
grad(1) = location_gradient_query[1];
grad(2) = location_gradient_query[2];
std::pair<float, bool> location_sdf_query = this->sdf->GetSafe(pos(0), pos(1), pos(2));
dist = location_sdf_query.first;
// if(dist < 0)
// cout << "pos:" << pos << "dist:" << dist << "grad:" << grad << endl;
// update distance and gradient using boundary
double dtb = getDistanceToBoundary(ori_pos(0), ori_pos(1), ori_pos(2));
if(dtb < dist)
{
dist = dtb;
recaluculateGradient(ori_pos(0), ori_pos(1), ori_pos(2), grad);
}
}
double GradTrajOptimizer::getDistanceToBoundary(const double &x, const double &y,
const double &z) const
{
double dist_x = min(x - boundary(0), boundary(1) - x);
double dist_y = min(y - boundary(2), boundary(3) - y);
double dist_z = min(z - boundary(4), boundary(5) - z);
double dtb = min(dist_x, dist_y);
dtb = min(dtb, dist_z);
return dtb;
}
void GradTrajOptimizer::recaluculateGradient(const double &x, const double &y, const double &z,
Eigen::Vector3d &grad) const
{
double r = this->resolution;
grad(0) = (10 * (GDTB(x + r, y, z) - GDTB(x - r, y, z)) +
3 * (GDTB(x + r, y + r, z) - GDTB(x - r, y + r, z)) +
3 * (GDTB(x + r, y - r, z) - GDTB(x - r, y - r, z))) /
(32 * r);
grad(1) = (10 * (GDTB(x, y + r, z) - GDTB(x, y - r, z)) +
3 * (GDTB(x + r, y + r, z) - GDTB(x + r, y - r, z)) +
3 * (GDTB(x - r, y + r, z) - GDTB(x - r, y - r, z))) /
(32 * r);
grad(2) = (10 * (GDTB(x, y, z + r) - GDTB(x, y, z - r)) +
3 * (GDTB(x, y + r, z + r) - GDTB(x, y + r, z - r)) +
3 * (GDTB(x, y - r, z + r) - GDTB(x, y - r, z - r))) /
(32 * r);
}
void GradTrajOptimizer::getTimeMatrix(const double &t, Eigen::MatrixXd &T) const
{
T.resize(1, 6);
T.setZero();
for(int i = 0; i < 6; ++i)
{
T(0, i) = pow(t, i);
}
}
/** NLopt format cost function */
double GradTrajOptimizer::costFunc(const std::vector<double> &x, std::vector<double> &grad,
void *func_data)
{
GradTrajOptimizer *gtop = reinterpret_cast<GradTrajOptimizer *>(func_data);
double cost;
gtop->getCostAndGradient(x, cost, grad);
return cost;
}
#endif |
dnn.c | //------------------------------------------------------------------------------
// LAGraph/Test/DNN/dnn: run all neural networks from http://graphchallenge.org
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph/Test/DNN/dnn: test for LAGraph_dnn. Contributed by Tim Davis,
// Texas A&M University.
// Usage: ./build/dnn nproblems
// nproblems is the # of test problems to solve. If not present, it defaults
// to 12 (run all 12 DNN's). The problems are solved in order from small to
// big. The Makefile just runs the first and smallest problem.
// NOTE: this test currently uses many GxB_* extensions in
// SuiteSparse:GraphBLAS. It optionally uses OpenMP.
#include <LAGraph.h>
#define LAGRAPH_FREE_ALL ;
int main (int argc, char **argv)
{
//--------------------------------------------------------------------------
// start LAGraph and GraphBLAS
//--------------------------------------------------------------------------
GrB_Info info ;
LAGRAPH_OK (LAGraph_init ( )) ;
//--------------------------------------------------------------------------
// problem size definitions
//--------------------------------------------------------------------------
// The 12 problems and their sizes are hard-coded below.
// It would be better to define these from the input files, but the problem
// data files are not formatted in a way that makes this easy to do. A
// Matrix Market file format would be better (which can specify the type
// and size of each matrix), with the additional of a problem specification
// file that defines each of the 12 problems to solve.
// Each problem is defined by a set of files in the DNN_DATA directory,
// which can be obtained from http://graphchallenge.org . The simplest way
// to redefine the location of the data files is to make ./dnn_data a
// symbolic link, and leave DNN_DATA unchanged. The .gitignore file will
// prevent dnn_data from syncing to github, so you could also simply change
// ./dnn_data to a true directory and place all files there. Or, change
// the DNN_DATA macro to point to your data files.
#define DNN_DATA "./dnn_data"
// Each of the 12 problems is defined by the # of neurons at each layer, N
// = (1024, 4096, 16384, 65536), and the # of layers, L = (120, 480, or
// 1920). Each problem has the same number of features (F = 60000). The
// input files for a given problem (N,L) are as follows:
// Input feature vectors: an F-by-N sparse matrix
// ./dnn_data/MNIST/sparse-images-(N).tsv
// Neural network layers, for i = 1 to L, each an N-by-N sparse matrix:
// ./dnn_data/DNN/neuron(N)/n(N)-l(i).tsv
// True categories, a list of integers, one per line:
// ./dnn_data/DNN/neuron(N)-l(L)-categories.tsv
// The Bias vectors are defined with the single scalar, neuralNetBias[ ],
// with one scalar for each value of N. This scalar is used to construct
// the diagonal Bias matrices for each layer. All the layers share the
// same matrix, but they are treated as different matrices here. In a more
// general problem, the Bias matrices would differ for each layer and
// perhaps for each neuron. As a result, this test is not permitted to
// exploit the fact that all neurons are biased the same way.
// Note that for a given number of neurons, N, each of the 3 problems for
// different layers shares the same weight matrices for the first layers.
// That is, the first 120 layers of the (1024,480) problem are the same as
// the 120 layers of the (1024,120) problem. This is not exploited in
// LAGraph_dnn, but it is exploited here, simply to reduce the time to load
// the problems.
int len = 1024 ;
char filename [len] ;
#define NMAXLAYERS 3
int maxLayers [NMAXLAYERS] = { 120, 480, 1920 } ;
// #define NMAXNEURONS 1
// int Nneurons [NMAXNEURONS] = { 65536 } ;
// double neuralNetBias [NMAXNEURONS] = { -0.45 } ;
#define NMAXNEURONS 4
int Nneurons [NMAXNEURONS] = { 1024, 4096, 16384, 65536 } ;
double neuralNetBias [NMAXNEURONS] = { -0.3, -0.35, -0.4, -0.45 } ;
int nfeatures = 60000 ;
GrB_Matrix Y0 = NULL, Y = NULL, W [65536], Bias [65536] ;
GrB_Vector TrueCategories = NULL, Categories = NULL, C = NULL ;
for (int layer = 0 ; layer < 65536 ; layer++)
{
W [layer] = NULL ;
Bias [layer] = NULL ;
}
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&TrueCategories) ; \
GrB_free (&Categories) ; \
GrB_free (&C) ; \
GrB_free (&Y) ; \
GrB_free (&Y0) ; \
for (int layer = 0 ; layer < 65536 ; layer++) \
{ \
GrB_free (& (W [layer])) ; \
GrB_free (& (Bias [layer])) ; \
} \
}
// select the type. GrB_FP32 is faster and passes all the tests.
// GrB_Type type = GrB_FP64 ;
GrB_Type type = GrB_FP32 ;
printf ("type: ") ;
if (type == GrB_FP64) printf ("double\n") ; else printf ("float\n") ;
// get the max # of threads that can be used
int nthreads_max ;
LAGRAPH_OK (GxB_get (GxB_NTHREADS, &nthreads_max)) ;
printf ("max # of nthreads: %d\n", nthreads_max) ;
#define NNTHREADS 12
int nthreads_list [NNTHREADS] =
{ 1, 2, 4, 8, 16, 20, 32, 40, 64, 128, 160, 256 } ;
// #define NNTHREADS 1
// int nthreads_list [NNTHREADS] = { 40 } ;
// determine the # of problems to solve
int nproblems = NMAXNEURONS * NMAXLAYERS ;
if (argc > 1)
{
sscanf (argv [1], "%d", &nproblems) ;
}
printf ("# of problems to solve: %d\n", nproblems) ;
int problem = 0 ;
//--------------------------------------------------------------------------
// run all problems
//--------------------------------------------------------------------------
for (int kn = 0 ; kn < NMAXNEURONS ; kn++)
{
//----------------------------------------------------------------------
// check if this problem is to be solved
//----------------------------------------------------------------------
if (problem > nproblems) continue ;
//----------------------------------------------------------------------
// get the number of nneurons and neural bias
//----------------------------------------------------------------------
double tic [2] ;
LAGraph_tic (tic) ;
int nneurons = Nneurons [kn] ;
double b = neuralNetBias [kn] ;
printf ("\n# neurons: %d bias: %g\n", nneurons, b) ;
//----------------------------------------------------------------------
// read in the initial feature vectors
//----------------------------------------------------------------------
sprintf (filename, "%s/MNIST/sparse-images-%d.tsv", DNN_DATA, nneurons);
FILE *f = fopen (filename, "r") ;
if (!f) { printf ("cannot open %s\n", filename) ; abort ( ) ; }
LAGRAPH_OK (LAGraph_tsvread (&Y0, f, type, nfeatures, nneurons)) ;
fclose (f) ;
double t = LAGraph_toc (tic) ;
printf ("# features: %" PRIu64 " read time: %g sec\n", nfeatures, t) ;
GrB_Index nvals ;
LAGRAPH_OK (GrB_Matrix_nvals (&nvals, Y0)) ;
printf ("# entries in Y0: %g million\n", (double) nvals / 1e6) ;
fflush (stdout) ;
//----------------------------------------------------------------------
// run each problem size (for all #'s of layers)
//----------------------------------------------------------------------
for (int kl = 0 ; kl < NMAXLAYERS ; kl++)
{
//------------------------------------------------------------------
// check if this problem is to be solved
//------------------------------------------------------------------
problem++ ;
if (problem > nproblems) continue ;
//------------------------------------------------------------------
// get the number of layers in this neural net
//------------------------------------------------------------------
int nlayers = maxLayers [kl] ;
printf ("\n--------------------------------------"
"neurons per layer: %d layers: %d\n", nneurons, nlayers) ;
//------------------------------------------------------------------
// read in the layers in parallel
//------------------------------------------------------------------
LAGraph_tic (tic) ;
int first_layer = (kl == 0) ? 0 : maxLayers [kl-1] ;
bool ok = true ;
// assume the I/O system can handle 2-way parallelism
#pragma omp parallel for schedule(dynamic,1) reduction(&&:ok) \
num_threads (2)
for (int layer = first_layer ; layer < nlayers ; layer++)
{
// read the neuron layer: W [layer]
char my_filename [1024] ;
sprintf (my_filename, "%s/DNN/neuron%d/n%d-l%d.tsv", DNN_DATA,
nneurons, nneurons, layer+1) ;
FILE *my_file = fopen (my_filename, "r") ;
bool my_ok = true ;
if (!my_file)
{
printf ("cannot open %s\n", my_filename) ;
my_ok = false ;
continue ;
}
GrB_Info my_info = LAGraph_tsvread (&(W [layer]), my_file,
type, nneurons, nneurons) ;
fclose (my_file) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
// construct the bias matrix: Bias [layer]. Note that all Bias
// matrices are the same for all layers, and all diagonal
// entries are also the same, but this test must not exploit
// that fact.
my_info = GrB_Matrix_new (&(Bias [layer]), type,
nneurons, nneurons) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
for (int i = 0 ; i < nneurons ; i++)
{
my_info = GrB_Matrix_setElement (Bias [layer], b, i, i) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
}
GrB_Index ignore ;
my_info = GrB_Matrix_nvals (&ignore, Bias [layer]) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
ok = ok && my_ok ;
}
if (!ok)
{
printf ("neural read failure\n") ;
abort ( ) ;
}
t = LAGraph_toc (tic) ;
printf ("read net time %g sec\n", t) ;
double nedges = 0 ;
for (int layer = 0 ; layer < nlayers ; layer++)
{
GrB_Index nvals ;
LAGRAPH_OK (GrB_Matrix_nvals (&nvals, W [layer])) ;
nedges += nvals ;
}
printf ("# edges in all layers: %g million\n\n",
(double) nedges / 1e6) ;
fflush (stdout) ;
// read TrueCategories as a boolean nfeatures-by-1 vector
LAGRAPH_OK (GrB_Vector_new (&TrueCategories, GrB_BOOL,
nfeatures)) ;
sprintf (filename, "%s/DNN/neuron%d-l%d-categories.tsv", DNN_DATA,
nneurons, nlayers) ;
f = fopen (filename, "r") ;
bool check_result = (f != NULL) ;
if (check_result)
{
while (1)
{
int category ;
if (fscanf (f, "%d\n", &category) == EOF) break ;
LAGRAPH_OK (GrB_Vector_setElement (TrueCategories,
(bool) true, category-1)) ;
}
fclose (f) ;
}
else
{
printf ("cannot open %s\n", filename) ;
}
//------------------------------------------------------------------
// solve the problem with 1, 2, 4, ..., nthreads_max threads
//------------------------------------------------------------------
double t1 = 0, tcheck = 0 ;
GrB_Index final_ynvals ;
for (int kth = 0 ; kth < NNTHREADS ; kth++)
{
//--------------------------------------------------------------
// set the # of threads to use
//--------------------------------------------------------------
int nthreads = nthreads_list [kth] ;
if (nthreads > nthreads_max) break ;
LAGRAPH_OK (GxB_set (GxB_NTHREADS, nthreads)) ;
printf ("nthreads %2d: ", nthreads) ;
fflush (stdout) ;
//--------------------------------------------------------------
// solve the problem
//--------------------------------------------------------------
LAGraph_tic (tic) ;
LAGRAPH_OK (LAGraph_dnn (&Y, W, Bias, nlayers, Y0)) ;
t = LAGraph_toc (tic) ;
printf ("soln time %12.2f sec", t) ;
if (nthreads == 1)
{
t1 = t ;
printf (" ") ;
}
else
{
printf (" speedup %8.2f", t1/t) ;
}
double rate = ((double) nfeatures) * ((double) nedges) / t ;
printf (" rate %10.4f (1e9 edges/sec) ", rate / 1e9) ;
//--------------------------------------------------------------
// check the result
//--------------------------------------------------------------
// this is so fast, it's hardly worth timing ...
LAGraph_tic (tic) ;
LAGRAPH_OK (GrB_Matrix_nvals (&final_ynvals, Y)) ;
// C = sum (Y)
LAGRAPH_OK (GrB_Vector_new (&C, type, nfeatures)) ;
LAGRAPH_OK (GrB_reduce (C, NULL, NULL, GrB_PLUS_FP64, Y, NULL));
// Categories = pattern of C
LAGRAPH_OK (GrB_Vector_new (&Categories, GrB_BOOL, nfeatures)) ;
LAGRAPH_OK (GrB_apply (Categories, NULL, NULL, GxB_ONE_BOOL,
C, NULL)) ;
// write out Categories, as a 1-based file
/*
sprintf (filename, "my_neuron%d-l%d-categories_threads%d.tsv",
nneurons, nlayers, nthreads) ;
FILE *ff = fopen (filename, "w") ;
for (int i = 0 ; i < nfeatures ; i++)
{
bool c = false ;
LAGRAPH_OK (GrB_Vector_extractElement (&c, Categories, i)) ;
if (c) fprintf (ff, "%d\n", i + 1) ;
}
fclose (ff) ;
*/
if (check_result)
{
// check if Categories and TrueCategories are the same
bool isequal ;
LAGRAPH_OK (LAGraph_Vector_isequal (&isequal,
TrueCategories, Categories, NULL)) ;
if (!isequal)
{
// GxB_print (TrueCategories, 3) ;
// GxB_print (Categories, 3) ;
printf ("test failure!\n") ;
// LAGRAPH_FREE_ALL ;
// abort ( ) ;
}
}
printf ("\n") ;
GrB_free (&Categories) ;
GrB_free (&C) ;
GrB_free (&Y) ;
tcheck = LAGraph_toc (tic) ;
}
printf ("\n# entries in final Y: %g million\n",
(double) final_ynvals / 1e6) ;
printf ("check time: %g sec\n", tcheck) ;
LAGRAPH_OK (GxB_set (GxB_NTHREADS, nthreads_max)) ;
}
//----------------------------------------------------------------------
// free the problem
//----------------------------------------------------------------------
LAGRAPH_FREE_ALL ;
}
//--------------------------------------------------------------------------
// finalize LAGraph and GraphBLAS
//--------------------------------------------------------------------------
LAGRAPH_OK (LAGraph_finalize ( )) ;
printf ("all tests passed\n") ;
return (GrB_SUCCESS) ;
}
|
Friends.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FRIENDS_H_
#define _FRIENDS_H_
#include <iostream>
#include "SpMat.h" // Best to include the base class first
#include "SpHelper.h"
#include "StackEntry.h"
#include "Isect.h"
#include "Deleter.h"
#include "SpImpl.h"
#include "SpParHelper.h"
#include "Compare.h"
#include "CombBLAS.h"
#include "PreAllocatedSPA.h"
namespace combblas {
template <class IU, class NU>
class SpTuples;
template <class IU, class NU>
class SpDCCols;
template <class IU, class NU>
class Dcsc;
/*************************************************************************************************/
/**************************** SHARED ADDRESS SPACE FRIEND FUNCTIONS ******************************/
/****************************** MULTITHREADED LOGIC ALSO GOES HERE *******************************/
/*************************************************************************************************/
//! SpMV with dense vector
template <typename SR, typename IU, typename NU, typename RHS, typename LHS>
void dcsc_gespmv (const SpDCCols<IU, NU> & A, const RHS * x, LHS * y)
{
if(A.nnz > 0)
{
for(IU j =0; j<A.dcsc->nzc; ++j) // for all nonzero columns
{
IU colid = A.dcsc->jc[j];
for(IU i = A.dcsc->cp[j]; i< A.dcsc->cp[j+1]; ++i)
{
IU rowid = A.dcsc->ir[i];
SR::axpy(A.dcsc->numx[i], x[colid], y[rowid]);
}
}
}
}
//! SpMV with dense vector (multithreaded version)
template <typename SR, typename IU, typename NU, typename RHS, typename LHS>
void dcsc_gespmv_threaded_nosplit (const SpDCCols<IU, NU> & A, const RHS * x, LHS * y)
{
if(A.nnz > 0)
{
int nthreads=1;
#ifdef _OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
IU nlocrows = A.getnrow();
LHS ** tomerge = SpHelper::allocate2D<LHS>(nthreads, nlocrows);
auto id = SR::id();
for(int i=0; i<nthreads; ++i)
{
std::fill_n(tomerge[i], nlocrows, id);
}
#pragma omp parallel for
for(IU j =0; j<A.dcsc->nzc; ++j) // for all nonzero columns
{
int curthread = 1;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
LHS * loc2merge = tomerge[curthread];
IU colid = A.dcsc->jc[j];
for(IU i = A.dcsc->cp[j]; i< A.dcsc->cp[j+1]; ++i)
{
IU rowid = A.dcsc->ir[i];
SR::axpy(A.dcsc->numx[i], x[colid], loc2merge[rowid]);
}
}
#pragma omp parallel for
for(IU j=0; j < nlocrows; ++j)
{
for(int i=0; i< nthreads; ++i)
{
y[j] = SR::add(y[j], tomerge[i][j]);
}
}
SpHelper::deallocate2D(tomerge, nthreads);
}
}
/**
* Multithreaded SpMV with dense vector
*/
template <typename SR, typename IU, typename NU, typename RHS, typename LHS>
void dcsc_gespmv_threaded (const SpDCCols<IU, NU> & A, const RHS * x, LHS * y)
{
if(A.nnz > 0)
{
int splits = A.getnsplit();
if(splits > 0)
{
IU nlocrows = A.getnrow();
IU perpiece = nlocrows / splits;
std::vector<int> disp(splits, 0);
for(int i=1; i<splits; ++i)
disp[i] = disp[i-1] + perpiece;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int s=0; s<splits; ++s)
{
Dcsc<IU, NU> * dcsc = A.GetInternal(s);
for(IU j =0; j<dcsc->nzc; ++j) // for all nonzero columns
{
IU colid = dcsc->jc[j];
for(IU i = dcsc->cp[j]; i< dcsc->cp[j+1]; ++i)
{
IU rowid = dcsc->ir[i] + disp[s];
SR::axpy(dcsc->numx[i], x[colid], y[rowid]);
}
}
}
}
else
{
dcsc_gespmv_threaded_nosplit<SR>(A,x,y);
}
}
}
/**
* Multithreaded SpMV with sparse vector
* the assembly of outgoing buffers sendindbuf/sendnumbuf are done here
*/
template <typename SR, typename IU, typename NUM, typename DER, typename IVT, typename OVT>
int generic_gespmv_threaded (const SpMat<IU,NUM,DER> & A, const int32_t * indx, const IVT * numx, int32_t nnzx,
int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int p_c, PreAllocatedSPA<OVT> & SPA)
{
// FACTS: Split boundaries (for multithreaded execution) are independent of recipient boundaries
// Two splits might create output to the same recipient (needs to be merged)
// However, each split's output is distinct (no duplicate elimination is needed after merge)
sdispls = new int[p_c](); // initialize to zero (as all indy might be empty)
if(A.getnnz() > 0 && nnzx > 0)
{
int splits = A.getnsplit();
if(splits > 0)
{
int32_t nlocrows = static_cast<int32_t>(A.getnrow());
int32_t perpiece = nlocrows / splits;
std::vector< std::vector< int32_t > > indy(splits);
std::vector< std::vector< OVT > > numy(splits);
// Parallelize with OpenMP
#ifdef _OPENMP
#pragma omp parallel for // num_threads(6)
#endif
for(int i=0; i<splits; ++i)
{
if(SPA.initialized)
{
if(i != splits-1)
SpMXSpV_ForThreading<SR>(*(A.GetInternal(i)), perpiece, indx, numx, nnzx, indy[i], numy[i], i*perpiece, SPA.V_localy[i], SPA.V_isthere[i], SPA.V_inds[i]);
else
SpMXSpV_ForThreading<SR>(*(A.GetInternal(i)), nlocrows - perpiece*i, indx, numx, nnzx, indy[i], numy[i], i*perpiece, SPA.V_localy[i], SPA.V_isthere[i], SPA.V_inds[i]);
}
else
{
if(i != splits-1)
SpMXSpV_ForThreading<SR>(*(A.GetInternal(i)), perpiece, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
else
SpMXSpV_ForThreading<SR>(*(A.GetInternal(i)), nlocrows - perpiece*i, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
}
}
std::vector<int> accum(splits+1, 0);
for(int i=0; i<splits; ++i)
accum[i+1] = accum[i] + indy[i].size();
sendindbuf = new int32_t[accum[splits]];
sendnumbuf = new OVT[accum[splits]];
int32_t perproc = nlocrows / p_c;
int32_t last_rec = p_c-1;
// keep recipients of last entries in each split (-1 for an empty split)
// so that we can delete indy[] and numy[] contents as soon as they are processed
std::vector<int32_t> end_recs(splits);
for(int i=0; i<splits; ++i)
{
if(indy[i].empty())
end_recs[i] = -1;
else
end_recs[i] = std::min(indy[i].back() / perproc, last_rec);
}
#ifdef _OPENMP
#pragma omp parallel for // num_threads(6)
#endif
for(int i=0; i<splits; ++i)
{
if(!indy[i].empty()) // guarantee that .begin() and .end() are not null
{
// FACT: Data is sorted, so if the recipient of begin is the same as the owner of end,
// then the whole data is sent to the same processor
int32_t beg_rec = std::min( indy[i].front() / perproc, last_rec);
// We have to test the previous "split", to see if we are marking a "recipient head"
// set displacement markers for the completed (previous) buffers only
if(i != 0)
{
int k = i-1;
while (k >= 0 && end_recs[k] == -1) k--; // loop backwards until seeing an non-empty split
if(k >= 0) // we found a non-empty split
{
std::fill(sdispls+end_recs[k]+1, sdispls+beg_rec+1, accum[i]); // last entry to be set is sdispls[beg_rec]
}
// else fill sdispls[1...beg_rec] with zero (already done)
}
// else set sdispls[0] to zero (already done)
if(beg_rec == end_recs[i]) // fast case
{
std::transform(indy[i].begin(), indy[i].end(), indy[i].begin(), std::bind2nd(std::minus<int32_t>(), perproc*beg_rec));
std::copy(indy[i].begin(), indy[i].end(), sendindbuf+accum[i]);
std::copy(numy[i].begin(), numy[i].end(), sendnumbuf+accum[i]);
}
else // slow case
{
// FACT: No matter how many splits or threads, there will be only one "recipient head"
// Therefore there are no race conditions for marking send displacements (sdispls)
int end = indy[i].size();
for(int cur=0; cur< end; ++cur)
{
int32_t cur_rec = std::min( indy[i][cur] / perproc, last_rec);
while(beg_rec != cur_rec)
{
sdispls[++beg_rec] = accum[i] + cur; // first entry to be set is sdispls[beg_rec+1]
}
sendindbuf[ accum[i] + cur ] = indy[i][cur] - perproc*beg_rec; // convert to receiver's local index
sendnumbuf[ accum[i] + cur ] = numy[i][cur];
}
}
std::vector<int32_t>().swap(indy[i]);
std::vector<OVT>().swap(numy[i]);
bool lastnonzero = true; // am I the last nonzero split?
for(int k=i+1; k < splits; ++k)
{
if(end_recs[k] != -1)
lastnonzero = false;
}
if(lastnonzero)
std::fill(sdispls+end_recs[i]+1, sdispls+p_c, accum[i+1]);
} // end_if(!indy[i].empty)
} // end parallel for
return accum[splits];
}
else
{
std::cout << "Something is wrong, splits should be nonzero for multithreaded execution" << std::endl;
return 0;
}
}
else
{
sendindbuf = NULL;
sendnumbuf = NULL;
return 0;
}
}
/**
* Multithreaded SpMV with sparse vector and preset buffers
* the assembly of outgoing buffers sendindbuf/sendnumbuf are done here
* IVT: input vector numerical type
* OVT: output vector numerical type
*/
template <typename SR, typename IU, typename NUM, typename DER, typename IVT, typename OVT>
void generic_gespmv_threaded_setbuffers (const SpMat<IU,NUM,DER> & A, const int32_t * indx, const IVT * numx, int32_t nnzx,
int32_t * sendindbuf, OVT * sendnumbuf, int * cnts, int * dspls, int p_c)
{
if(A.getnnz() > 0 && nnzx > 0)
{
int splits = A.getnsplit();
if(splits > 0)
{
std::vector< std::vector<int32_t> > indy(splits);
std::vector< std::vector< OVT > > numy(splits);
int32_t nlocrows = static_cast<int32_t>(A.getnrow());
int32_t perpiece = nlocrows / splits;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
if(i != splits-1)
SpMXSpV_ForThreading<SR>(*(A.GetInternal(i)), perpiece, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
else
SpMXSpV_ForThreading<SR>(*(A.GetInternal(i)), nlocrows - perpiece*i, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
}
int32_t perproc = nlocrows / p_c;
int32_t last_rec = p_c-1;
// keep recipients of last entries in each split (-1 for an empty split)
// so that we can delete indy[] and numy[] contents as soon as they are processed
std::vector<int32_t> end_recs(splits);
for(int i=0; i<splits; ++i)
{
if(indy[i].empty())
end_recs[i] = -1;
else
end_recs[i] = std::min(indy[i].back() / perproc, last_rec);
}
int ** loc_rec_cnts = new int *[splits];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
loc_rec_cnts[i] = new int[p_c](); // thread-local recipient data
if(!indy[i].empty()) // guarantee that .begin() and .end() are not null
{
int32_t cur_rec = std::min( indy[i].front() / perproc, last_rec);
int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient
for(typename std::vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it)
{
if( ( (*it) >= lastdata ) && cur_rec != last_rec )
{
cur_rec = std::min( (*it) / perproc, last_rec);
lastdata = (cur_rec+1) * perproc;
}
++loc_rec_cnts[i][cur_rec];
}
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
if(!indy[i].empty()) // guarantee that .begin() and .end() are not null
{
// FACT: Data is sorted, so if the recipient of begin is the same as the owner of end,
// then the whole data is sent to the same processor
int32_t beg_rec = std::min( indy[i].front() / perproc, last_rec);
int32_t alreadysent = 0; // already sent per recipient
for(int before = i-1; before >= 0; before--)
alreadysent += loc_rec_cnts[before][beg_rec];
if(beg_rec == end_recs[i]) // fast case
{
std::transform(indy[i].begin(), indy[i].end(), indy[i].begin(), std::bind2nd(std::minus<int32_t>(), perproc*beg_rec));
std::copy(indy[i].begin(), indy[i].end(), sendindbuf + dspls[beg_rec] + alreadysent);
std::copy(numy[i].begin(), numy[i].end(), sendnumbuf + dspls[beg_rec] + alreadysent);
}
else // slow case
{
int32_t cur_rec = beg_rec;
int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient
for(typename std::vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it)
{
if( ( (*it) >= lastdata ) && cur_rec != last_rec )
{
cur_rec = std::min( (*it) / perproc, last_rec);
lastdata = (cur_rec+1) * perproc;
// if this split switches to a new recipient after sending some data
// then it's sure that no data has been sent to that recipient yet
alreadysent = 0;
}
sendindbuf[ dspls[cur_rec] + alreadysent ] = (*it) - perproc*cur_rec; // convert to receiver's local index
sendnumbuf[ dspls[cur_rec] + (alreadysent++) ] = *(numy[i].begin() + (it-indy[i].begin()));
}
}
}
}
// Deallocated rec counts serially once all threads complete
for(int i=0; i< splits; ++i)
{
for(int j=0; j< p_c; ++j)
cnts[j] += loc_rec_cnts[i][j];
delete [] loc_rec_cnts[i];
}
delete [] loc_rec_cnts;
}
else
{
std::cout << "Something is wrong, splits should be nonzero for multithreaded execution" << std::endl;
}
}
}
//! SpMV with sparse vector
//! MIND: Matrix index type
//! VIND: Vector index type (optimized: int32_t, general: int64_t)
template <typename SR, typename MIND, typename VIND, typename DER, typename NUM, typename IVT, typename OVT>
void generic_gespmv (const SpMat<MIND,NUM,DER> & A, const VIND * indx, const IVT * numx, VIND nnzx, std::vector<VIND> & indy, std::vector<OVT> & numy, PreAllocatedSPA<OVT> & SPA)
{
if(A.getnnz() > 0 && nnzx > 0)
{
if(A.getnsplit() > 0)
{
std::cout << "Call dcsc_gespmv_threaded instead" << std::endl;
}
else
{
SpMXSpV<SR>(*(A.GetInternal()), (VIND) A.getnrow(), indx, numx, nnzx, indy, numy, SPA);
}
}
}
/** SpMV with sparse vector
* @param[in] indexisvalue is only used for BFS-like computations, if true then we can call the optimized version that skips SPA
*/
template <typename SR, typename IU, typename DER, typename NUM, typename IVT, typename OVT>
void generic_gespmv (const SpMat<IU,NUM,DER> & A, const int32_t * indx, const IVT * numx, int32_t nnzx,
int32_t * indy, OVT * numy, int * cnts, int * dspls, int p_c, bool indexisvalue)
{
if(A.getnnz() > 0 && nnzx > 0)
{
if(A.getnsplit() > 0)
{
SpParHelper::Print("Call dcsc_gespmv_threaded instead\n");
}
else
{
SpMXSpV<SR>(*(A.GetInternal()), (int32_t) A.getnrow(), indx, numx, nnzx, indy, numy, cnts, dspls, p_c);
}
}
}
template<typename IU>
void BooleanRowSplit(SpDCCols<IU, bool> & A, int numsplits)
{
if(A.m < numsplits)
{
std::cerr<< "Warning: Matrix is too small to be splitted for multithreading" << std::endl;
return;
}
A.splits = numsplits;
IU perpiece = A.m / A.splits;
std::vector<IU> prevcolids(A.splits, -1); // previous column id's are set to -1
std::vector<IU> nzcs(A.splits, 0);
std::vector<IU> nnzs(A.splits, 0);
std::vector < std::vector < std::pair<IU,IU> > > colrowpairs(A.splits);
if(A.nnz > 0 && A.dcsc != NULL)
{
for(IU i=0; i< A.dcsc->nzc; ++i)
{
for(IU j = A.dcsc->cp[i]; j< A.dcsc->cp[i+1]; ++j)
{
IU colid = A.dcsc->jc[i];
IU rowid = A.dcsc->ir[j];
IU owner = std::min(rowid / perpiece, static_cast<IU>(A.splits-1));
colrowpairs[owner].push_back(std::make_pair(colid, rowid - owner*perpiece));
if(prevcolids[owner] != colid)
{
prevcolids[owner] = colid;
++nzcs[owner];
}
++nnzs[owner];
}
}
}
delete A.dcsc; // claim memory
//copy(nzcs.begin(), nzcs.end(), ostream_iterator<IU>(cout," " )); cout << endl;
//copy(nnzs.begin(), nnzs.end(), ostream_iterator<IU>(cout," " )); cout << endl;
A.dcscarr = new Dcsc<IU,bool>*[A.splits];
// To be parallelized with OpenMP
for(int i=0; i< A.splits; ++i)
{
sort(colrowpairs[i].begin(), colrowpairs[i].end()); // sort w.r.t. columns
if(nzcs[i]>0)
{
A.dcscarr[i] = new Dcsc<IU,bool>(nnzs[i],nzcs[i]);
std::fill(A.dcscarr[i]->numx, A.dcscarr[i]->numx+nnzs[i], static_cast<bool>(1));
IU curnzc = 0; // number of nonzero columns constructed so far
IU cindex = colrowpairs[i][0].first;
IU rindex = colrowpairs[i][0].second;
A.dcscarr[i]->ir[0] = rindex;
A.dcscarr[i]->jc[curnzc] = cindex;
A.dcscarr[i]->cp[curnzc++] = 0;
for(IU j=1; j<nnzs[i]; ++j)
{
cindex = colrowpairs[i][j].first;
rindex = colrowpairs[i][j].second;
A.dcscarr[i]->ir[j] = rindex;
if(cindex != A.dcscarr[i]->jc[curnzc-1])
{
A.dcscarr[i]->jc[curnzc] = cindex;
A.dcscarr[i]->cp[curnzc++] = j;
}
}
A.dcscarr[i]->cp[curnzc] = nnzs[i];
}
else
{
A.dcscarr[i] = new Dcsc<IU,bool>();
}
}
}
/**
* SpTuples(A*B') (Using OuterProduct Algorithm)
* Returns the tuples for efficient merging later
* Support mixed precision multiplication
* The multiplication is on the specified semiring (passed as parameter)
*/
template<class SR, class NUO, class IU, class NU1, class NU2>
SpTuples<IU, NUO> * Tuples_AnXBt
(const SpDCCols<IU, NU1> & A,
const SpDCCols<IU, NU2> & B,
bool clearA = false, bool clearB = false)
{
IU mdim = A.m;
IU ndim = B.m; // B is already transposed
if(A.isZero() || B.isZero())
{
if(clearA) delete const_cast<SpDCCols<IU, NU1> *>(&A);
if(clearB) delete const_cast<SpDCCols<IU, NU2> *>(&B);
return new SpTuples< IU, NUO >(0, mdim, ndim); // just return an empty matrix
}
Isect<IU> *isect1, *isect2, *itr1, *itr2, *cols, *rows;
SpHelper::SpIntersect(*(A.dcsc), *(B.dcsc), cols, rows, isect1, isect2, itr1, itr2);
IU kisect = static_cast<IU>(itr1-isect1); // size of the intersection ((itr1-isect1) == (itr2-isect2))
if(kisect == 0)
{
if(clearA) delete const_cast<SpDCCols<IU, NU1> *>(&A);
if(clearB) delete const_cast<SpDCCols<IU, NU2> *>(&B);
DeleteAll(isect1, isect2, cols, rows);
return new SpTuples< IU, NUO >(0, mdim, ndim);
}
StackEntry< NUO, std::pair<IU,IU> > * multstack;
IU cnz = SpHelper::SpCartesian< SR > (*(A.dcsc), *(B.dcsc), kisect, isect1, isect2, multstack);
DeleteAll(isect1, isect2, cols, rows);
if(clearA) delete const_cast<SpDCCols<IU, NU1> *>(&A);
if(clearB) delete const_cast<SpDCCols<IU, NU2> *>(&B);
return new SpTuples<IU, NUO> (cnz, mdim, ndim, multstack);
}
/**
* SpTuples(A*B) (Using ColByCol Algorithm)
* Returns the tuples for efficient merging later
* Support mixed precision multiplication
* The multiplication is on the specified semiring (passed as parameter)
*/
template<class SR, class NUO, class IU, class NU1, class NU2>
SpTuples<IU, NUO> * Tuples_AnXBn
(const SpDCCols<IU, NU1> & A,
const SpDCCols<IU, NU2> & B,
bool clearA = false, bool clearB = false)
{
IU mdim = A.m;
IU ndim = B.n;
if(A.isZero() || B.isZero())
{
return new SpTuples<IU, NUO>(0, mdim, ndim);
}
StackEntry< NUO, std::pair<IU,IU> > * multstack;
IU cnz = SpHelper::SpColByCol< SR > (*(A.dcsc), *(B.dcsc), A.n, multstack);
if(clearA)
delete const_cast<SpDCCols<IU, NU1> *>(&A);
if(clearB)
delete const_cast<SpDCCols<IU, NU2> *>(&B);
return new SpTuples<IU, NUO> (cnz, mdim, ndim, multstack);
}
template<class SR, class NUO, class IU, class NU1, class NU2>
SpTuples<IU, NUO> * Tuples_AtXBt
(const SpDCCols<IU, NU1> & A,
const SpDCCols<IU, NU2> & B,
bool clearA = false, bool clearB = false)
{
IU mdim = A.n;
IU ndim = B.m;
std::cout << "Tuples_AtXBt function has not been implemented yet !" << std::endl;
return new SpTuples<IU, NUO> (0, mdim, ndim);
}
template<class SR, class NUO, class IU, class NU1, class NU2>
SpTuples<IU, NUO> * Tuples_AtXBn
(const SpDCCols<IU, NU1> & A,
const SpDCCols<IU, NU2> & B,
bool clearA = false, bool clearB = false)
{
IU mdim = A.n;
IU ndim = B.n;
std::cout << "Tuples_AtXBn function has not been implemented yet !" << std::endl;
return new SpTuples<IU, NUO> (0, mdim, ndim);
}
// Performs a balanced merge of the array of SpTuples
// Assumes the input parameters are already column sorted
template<class SR, class IU, class NU>
SpTuples<IU,NU> MergeAll( const std::vector<SpTuples<IU,NU> *> & ArrSpTups, IU mstar = 0, IU nstar = 0, bool delarrs = false )
{
int hsize = ArrSpTups.size();
if(hsize == 0)
{
return SpTuples<IU,NU>(0, mstar,nstar);
}
else
{
mstar = ArrSpTups[0]->m;
nstar = ArrSpTups[0]->n;
}
for(int i=1; i< hsize; ++i)
{
if((mstar != ArrSpTups[i]->m) || nstar != ArrSpTups[i]->n)
{
std::cerr << "Dimensions do not match on MergeAll()" << std::endl;
return SpTuples<IU,NU>(0,0,0);
}
}
if(hsize > 1)
{
ColLexiCompare<IU,int> heapcomp;
std::tuple<IU, IU, int> * heap = new std::tuple<IU, IU, int> [hsize]; // (rowindex, colindex, source-id)
IU * curptr = new IU[hsize];
std::fill_n(curptr, hsize, static_cast<IU>(0));
IU estnnz = 0;
for(int i=0; i< hsize; ++i)
{
estnnz += ArrSpTups[i]->getnnz();
heap[i] = std::make_tuple(std::get<0>(ArrSpTups[i]->tuples[0]), std::get<1>(ArrSpTups[i]->tuples[0]), i);
}
std::make_heap(heap, heap+hsize, std::not2(heapcomp));
std::tuple<IU, IU, NU> * ntuples = new std::tuple<IU,IU,NU>[estnnz];
IU cnz = 0;
while(hsize > 0)
{
std::pop_heap(heap, heap + hsize, std::not2(heapcomp)); // result is stored in heap[hsize-1]
int source = std::get<2>(heap[hsize-1]);
if( (cnz != 0) &&
((std::get<0>(ntuples[cnz-1]) == std::get<0>(heap[hsize-1])) && (std::get<1>(ntuples[cnz-1]) == std::get<1>(heap[hsize-1]))) )
{
std::get<2>(ntuples[cnz-1]) = SR::add(std::get<2>(ntuples[cnz-1]), ArrSpTups[source]->numvalue(curptr[source]++));
}
else
{
ntuples[cnz++] = ArrSpTups[source]->tuples[curptr[source]++];
}
if(curptr[source] != ArrSpTups[source]->getnnz()) // That array has not been depleted
{
heap[hsize-1] = std::make_tuple(std::get<0>(ArrSpTups[source]->tuples[curptr[source]]),
std::get<1>(ArrSpTups[source]->tuples[curptr[source]]), source);
std::push_heap(heap, heap+hsize, std::not2(heapcomp));
}
else
{
--hsize;
}
}
SpHelper::ShrinkArray(ntuples, cnz);
DeleteAll(heap, curptr);
if(delarrs)
{
for(size_t i=0; i<ArrSpTups.size(); ++i)
delete ArrSpTups[i];
}
return SpTuples<IU,NU> (cnz, mstar, nstar, ntuples);
}
else
{
SpTuples<IU,NU> ret = *ArrSpTups[0];
if(delarrs)
delete ArrSpTups[0];
return ret;
}
}
/**
* operation is A = A .* not(B)
**/
template <typename IU, typename NU1, typename NU2>
Dcsc<IU, typename promote_trait<NU1,NU2>::T_promote> SetDifference(const Dcsc<IU,NU1> & A, const Dcsc<IU,NU2> * B)
{
typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
IU estnzc, estnz;
estnzc = A.nzc;
estnz = A.nz;
Dcsc<IU,N_promote> temp(estnz, estnzc);
IU curnzc = 0;
IU curnz = 0;
IU i = 0;
IU j = 0;
temp.cp[0] = 0;
while(i< A.nzc && B != NULL && j< B->nzc)
{
if(A.jc[i] > B->jc[j]) ++j;
else if(A.jc[i] < B->jc[j])
{
temp.jc[curnzc++] = A.jc[i++];
for(IU k = A.cp[i-1]; k< A.cp[i]; k++)
{
temp.ir[curnz] = A.ir[k];
temp.numx[curnz++] = A.numx[k];
}
temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]);
}
else
{
IU ii = A.cp[i];
IU jj = B->cp[j];
IU prevnz = curnz;
while (ii < A.cp[i+1] && jj < B->cp[j+1])
{
if (A.ir[ii] > B->ir[jj]) ++jj;
else if (A.ir[ii] < B->ir[jj])
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = A.numx[ii++];
}
else // eliminate those existing nonzeros
{
++ii;
++jj;
}
}
while (ii < A.cp[i+1])
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = A.numx[ii++];
}
if(prevnz < curnz) // at least one nonzero exists in this column
{
temp.jc[curnzc++] = A.jc[i];
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
++i;
++j;
}
}
while(i< A.nzc)
{
temp.jc[curnzc++] = A.jc[i++];
for(IU k = A.cp[i-1]; k< A.cp[i]; ++k)
{
temp.ir[curnz] = A.ir[k];
temp.numx[curnz++] = A.numx[k];
}
temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]);
}
temp.Resize(curnzc, curnz);
return temp;
}
/**
* @param[in] exclude if false,
* \n then operation is A = A .* B
* \n else operation is A = A .* not(B)
*
* Aydin (June 2021): exclude=true case of this function now calls SetDifference above, to remove code duplication
**/
template <typename IU, typename NU1, typename NU2>
Dcsc<IU, typename promote_trait<NU1,NU2>::T_promote> EWiseMult(const Dcsc<IU,NU1> & A, const Dcsc<IU,NU2> * B, bool exclude)
{
typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
IU estnzc, estnz;
if(exclude)
{
return combblas::SetDifference(A, B); // call set difference for this version
}
else // A = A .* B
{
estnzc = std::min(A.nzc, B->nzc);
estnz = std::min(A.nz, B->nz);
Dcsc<IU,N_promote> temp(estnz, estnzc);
IU curnzc = 0;
IU curnz = 0;
IU i = 0;
IU j = 0;
temp.cp[0] = 0;
while(i< A.nzc && B != NULL && j<B->nzc)
{
if(A.jc[i] > B->jc[j]) ++j;
else if(A.jc[i] < B->jc[j]) ++i;
else
{
IU ii = A.cp[i];
IU jj = B->cp[j];
IU prevnz = curnz;
while (ii < A.cp[i+1] && jj < B->cp[j+1])
{
if (A.ir[ii] < B->ir[jj]) ++ii;
else if (A.ir[ii] > B->ir[jj]) ++jj;
else
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = A.numx[ii++] * B->numx[jj++];
}
}
if(prevnz < curnz) // at least one nonzero exists in this column
{
temp.jc[curnzc++] = A.jc[i];
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
++i;
++j;
}
}
temp.Resize(curnzc, curnz);
return temp;
}
}
template <typename N_promote, typename IU, typename NU1, typename NU2, typename _BinaryOperation>
Dcsc<IU, N_promote> EWiseApply(const Dcsc<IU,NU1> & A, const Dcsc<IU,NU2> * B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal)
{
//typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
IU estnzc, estnz;
if(notB)
{
estnzc = A.nzc;
estnz = A.nz;
}
else
{
estnzc = std::min(A.nzc, B->nzc);
estnz = std::min(A.nz, B->nz);
}
Dcsc<IU,N_promote> temp(estnz, estnzc);
IU curnzc = 0;
IU curnz = 0;
IU i = 0;
IU j = 0;
temp.cp[0] = 0;
if(!notB) // A = A .* B
{
while(i< A.nzc && B != NULL && j<B->nzc)
{
if(A.jc[i] > B->jc[j]) ++j;
else if(A.jc[i] < B->jc[j]) ++i;
else
{
IU ii = A.cp[i];
IU jj = B->cp[j];
IU prevnz = curnz;
while (ii < A.cp[i+1] && jj < B->cp[j+1])
{
if (A.ir[ii] < B->ir[jj]) ++ii;
else if (A.ir[ii] > B->ir[jj]) ++jj;
else
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = __binary_op(A.numx[ii++], B->numx[jj++]);
}
}
if(prevnz < curnz) // at least one nonzero exists in this column
{
temp.jc[curnzc++] = A.jc[i];
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
++i;
++j;
}
}
}
else // A = A .* not(B)
{
while(i< A.nzc && B != NULL && j< B->nzc)
{
if(A.jc[i] > B->jc[j]) ++j;
else if(A.jc[i] < B->jc[j])
{
temp.jc[curnzc++] = A.jc[i++];
for(IU k = A.cp[i-1]; k< A.cp[i]; k++)
{
temp.ir[curnz] = A.ir[k];
temp.numx[curnz++] = __binary_op(A.numx[k], defaultBVal);
}
temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]);
}
else
{
IU ii = A.cp[i];
IU jj = B->cp[j];
IU prevnz = curnz;
while (ii < A.cp[i+1] && jj < B->cp[j+1])
{
if (A.ir[ii] > B->ir[jj]) ++jj;
else if (A.ir[ii] < B->ir[jj])
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = __binary_op(A.numx[ii++], defaultBVal);
}
else // eliminate those existing nonzeros
{
++ii;
++jj;
}
}
while (ii < A.cp[i+1])
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = __binary_op(A.numx[ii++], defaultBVal);
}
if(prevnz < curnz) // at least one nonzero exists in this column
{
temp.jc[curnzc++] = A.jc[i];
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
++i;
++j;
}
}
while(i< A.nzc)
{
temp.jc[curnzc++] = A.jc[i++];
for(IU k = A.cp[i-1]; k< A.cp[i]; ++k)
{
temp.ir[curnz] = A.ir[k];
temp.numx[curnz++] = __binary_op(A.numx[k], defaultBVal);
}
temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]);
}
}
temp.Resize(curnzc, curnz);
return temp;
}
template<typename IU, typename NU1, typename NU2>
SpDCCols<IU, typename promote_trait<NU1,NU2>::T_promote > EWiseMult (const SpDCCols<IU,NU1> & A, const SpDCCols<IU,NU2> & B, bool exclude)
{
typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
assert(A.m == B.m);
assert(A.n == B.n);
Dcsc<IU, N_promote> * tdcsc = NULL;
if(A.nnz > 0 && B.nnz > 0)
{
tdcsc = new Dcsc<IU, N_promote>(EWiseMult(*(A.dcsc), B.dcsc, exclude));
return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc);
}
else if (A.nnz > 0 && exclude) // && B.nnz == 0
{
tdcsc = new Dcsc<IU, N_promote>(EWiseMult(*(A.dcsc), (const Dcsc<IU,NU2>*)NULL, exclude));
return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc);
}
else
{
return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc);
}
}
template<typename N_promote, typename IU, typename NU1, typename NU2, typename _BinaryOperation>
SpDCCols<IU, N_promote> EWiseApply (const SpDCCols<IU,NU1> & A, const SpDCCols<IU,NU2> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal)
{
//typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
assert(A.m == B.m);
assert(A.n == B.n);
Dcsc<IU, N_promote> * tdcsc = NULL;
if(A.nnz > 0 && B.nnz > 0)
{
tdcsc = new Dcsc<IU, N_promote>(EWiseApply<N_promote>(*(A.dcsc), B.dcsc, __binary_op, notB, defaultBVal));
return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc);
}
else if (A.nnz > 0 && notB) // && B.nnz == 0
{
tdcsc = new Dcsc<IU, N_promote>(EWiseApply<N_promote>(*(A.dcsc), (const Dcsc<IU,NU2>*)NULL, __binary_op, notB, defaultBVal));
return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc);
}
else
{
return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc);
}
}
/**
* Implementation based on operator +=
* Element wise apply with the following constraints
* The operation to be performed is __binary_op
* The operation `c = __binary_op(a, b)` is only performed if `do_op(a, b)` returns true
* If allowANulls is true, then if A is missing an element that B has, then ANullVal is used
* In that case the operation becomes c[i,j] = __binary_op(ANullVal, b[i,j])
* If both allowANulls and allowBNulls is false then the function degenerates into intersection
*/
template <typename RETT, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
Dcsc<IU, RETT> EWiseApply(const Dcsc<IU,NU1> * Ap, const Dcsc<IU,NU2> * Bp, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect)
{
if (Ap == NULL && Bp == NULL)
return Dcsc<IU,RETT>(0, 0);
if (Ap == NULL && Bp != NULL)
{
if (!allowANulls)
return Dcsc<IU,RETT>(0, 0);
const Dcsc<IU,NU2> & B = *Bp;
IU estnzc = B.nzc;
IU estnz = B.nz;
Dcsc<IU,RETT> temp(estnz, estnzc);
IU curnzc = 0;
IU curnz = 0;
//IU i = 0;
IU j = 0;
temp.cp[0] = 0;
while(j<B.nzc)
{
// Based on the if statement below which handles A null values.
j++;
IU prevnz = curnz;
temp.jc[curnzc++] = B.jc[j-1];
for(IU k = B.cp[j-1]; k< B.cp[j]; ++k)
{
if (do_op(ANullVal, B.numx[k], true, false))
{
temp.ir[curnz] = B.ir[k];
temp.numx[curnz++] = __binary_op(ANullVal, B.numx[k], true, false);
}
}
//temp.cp[curnzc] = temp.cp[curnzc-1] + (B.cp[j] - B.cp[j-1]);
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
temp.Resize(curnzc, curnz);
return temp;
}
if (Ap != NULL && Bp == NULL)
{
if (!allowBNulls)
return Dcsc<IU,RETT>(0, 0);
const Dcsc<IU,NU1> & A = *Ap;
IU estnzc = A.nzc;
IU estnz = A.nz;
Dcsc<IU,RETT> temp(estnz, estnzc);
IU curnzc = 0;
IU curnz = 0;
IU i = 0;
//IU j = 0;
temp.cp[0] = 0;
while(i< A.nzc)
{
i++;
IU prevnz = curnz;
temp.jc[curnzc++] = A.jc[i-1];
for(IU k = A.cp[i-1]; k< A.cp[i]; k++)
{
if (do_op(A.numx[k], BNullVal, false, true))
{
temp.ir[curnz] = A.ir[k];
temp.numx[curnz++] = __binary_op(A.numx[k], BNullVal, false, true);
}
}
//temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]);
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
temp.Resize(curnzc, curnz);
return temp;
}
// both A and B are non-NULL at this point
const Dcsc<IU,NU1> & A = *Ap;
const Dcsc<IU,NU2> & B = *Bp;
IU estnzc = A.nzc + B.nzc;
IU estnz = A.nz + B.nz;
Dcsc<IU,RETT> temp(estnz, estnzc);
IU curnzc = 0;
IU curnz = 0;
IU i = 0;
IU j = 0;
temp.cp[0] = 0;
while(i< A.nzc && j<B.nzc)
{
if(A.jc[i] > B.jc[j])
{
j++;
if (allowANulls)
{
IU prevnz = curnz;
temp.jc[curnzc++] = B.jc[j-1];
for(IU k = B.cp[j-1]; k< B.cp[j]; ++k)
{
if (do_op(ANullVal, B.numx[k], true, false))
{
temp.ir[curnz] = B.ir[k];
temp.numx[curnz++] = __binary_op(ANullVal, B.numx[k], true, false);
}
}
//temp.cp[curnzc] = temp.cp[curnzc-1] + (B.cp[j] - B.cp[j-1]);
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
}
else if(A.jc[i] < B.jc[j])
{
i++;
if (allowBNulls)
{
IU prevnz = curnz;
temp.jc[curnzc++] = A.jc[i-1];
for(IU k = A.cp[i-1]; k< A.cp[i]; k++)
{
if (do_op(A.numx[k], BNullVal, false, true))
{
temp.ir[curnz] = A.ir[k];
temp.numx[curnz++] = __binary_op(A.numx[k], BNullVal, false, true);
}
}
//temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]);
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
}
else
{
temp.jc[curnzc++] = A.jc[i];
IU ii = A.cp[i];
IU jj = B.cp[j];
IU prevnz = curnz;
while (ii < A.cp[i+1] && jj < B.cp[j+1])
{
if (A.ir[ii] < B.ir[jj])
{
if (allowBNulls && do_op(A.numx[ii], BNullVal, false, true))
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = __binary_op(A.numx[ii++], BNullVal, false, true);
}
else
ii++;
}
else if (A.ir[ii] > B.ir[jj])
{
if (allowANulls && do_op(ANullVal, B.numx[jj], true, false))
{
temp.ir[curnz] = B.ir[jj];
temp.numx[curnz++] = __binary_op(ANullVal, B.numx[jj++], true, false);
}
else
jj++;
}
else
{
if (allowIntersect && do_op(A.numx[ii], B.numx[jj], false, false))
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = __binary_op(A.numx[ii++], B.numx[jj++], false, false); // might include zeros
}
else
{
ii++;
jj++;
}
}
}
while (ii < A.cp[i+1])
{
if (allowBNulls && do_op(A.numx[ii], BNullVal, false, true))
{
temp.ir[curnz] = A.ir[ii];
temp.numx[curnz++] = __binary_op(A.numx[ii++], BNullVal, false, true);
}
else
ii++;
}
while (jj < B.cp[j+1])
{
if (allowANulls && do_op(ANullVal, B.numx[jj], true, false))
{
temp.ir[curnz] = B.ir[jj];
temp.numx[curnz++] = __binary_op(ANullVal, B.numx[jj++], true, false);
}
else
jj++;
}
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
++i;
++j;
}
}
while(allowBNulls && i< A.nzc) // remaining A elements after B ran out
{
IU prevnz = curnz;
temp.jc[curnzc++] = A.jc[i++];
for(IU k = A.cp[i-1]; k< A.cp[i]; ++k)
{
if (do_op(A.numx[k], BNullVal, false, true))
{
temp.ir[curnz] = A.ir[k];
temp.numx[curnz++] = __binary_op(A.numx[k], BNullVal, false, true);
}
}
//temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]);
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
while(allowANulls && j < B.nzc) // remaining B elements after A ran out
{
IU prevnz = curnz;
temp.jc[curnzc++] = B.jc[j++];
for(IU k = B.cp[j-1]; k< B.cp[j]; ++k)
{
if (do_op(ANullVal, B.numx[k], true, false))
{
temp.ir[curnz] = B.ir[k];
temp.numx[curnz++] = __binary_op(ANullVal, B.numx[k], true, false);
}
}
//temp.cp[curnzc] = temp.cp[curnzc-1] + (B.cp[j] - B.cp[j-1]);
temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz;
}
temp.Resize(curnzc, curnz);
return temp;
}
template <typename RETT, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
SpDCCols<IU,RETT> EWiseApply (const SpDCCols<IU,NU1> & A, const SpDCCols<IU,NU2> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect)
{
assert(A.m == B.m);
assert(A.n == B.n);
Dcsc<IU, RETT> * tdcsc = new Dcsc<IU, RETT>(EWiseApply<RETT>(A.dcsc, B.dcsc, __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect));
return SpDCCols<IU, RETT> (A.m , A.n, tdcsc);
}
}
#endif
|
2d_array_ptr_v1_omp.c | // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 2 %t) %s.reference_output
#include <stdlib.h>
#include <omp.h>
int main()
{
int** arr = malloc(sizeof(int*) * 2);
arr[0] = malloc(sizeof(int) * 2);
arr[1] = malloc(sizeof(int) * 2);
#pragma omp parallel
{
arr[0][0] = 0;
arr[0][1] = 1;
arr[1][0] = 2;
arr[1][1] = 3;
printf("[%d, %d]\n[%d, %d]\n", arr[0][0], arr[0][1], arr[1][0], arr[1][1]);
}
free(arr[0]);
free(arr[1]);
free(arr);
}
|
Chromosome.h | /*
* Chrom.h
*
* Created on: 14 de mar de 2016
* Author: luan
*/
#ifndef CHROM_H_
#define CHROM_H_
#include <iostream>
#include <vector>
#include <list>
#include <limits>
#include <algorithm>
#include <random>
#include "Data.h"
#include <omp.h>
using namespace std;
struct vm_time {
vector<pair<double, double>> life;
double last;
double static_vm = false;
double storage = 0.0;
int count_file = 0;
};
std::random_device rd_chr;
std::mt19937 engine_chr(rd_chr());
class Chromosome {
public:
vector<int> allocation, height_soft;
vector<double> time_vector, start_time_vector;
vector<int> ordering;
double fitness, lambda, transfer_size;
Data* data;
unordered_map<int, vector<string>> scheduler;
unordered_map<int, vector<int>> vm_queue;
Chromosome(Data* data, double lambda) :
allocation(data->size, -1), height_soft(data->task_size, -1),
time_vector(data->task_size, -1), start_time_vector(data->task_size, -1),
ordering(0), fitness(0), lambda(lambda), transfer_size(0), data(data) {
computeHeightSoft(data->id_root);
encode();
computeFitness();
}
Chromosome(const Chromosome* other) :
allocation(other->allocation), height_soft(other->height_soft),
time_vector(other->time_vector), start_time_vector(other->start_time_vector),
ordering(other->ordering), fitness(other->fitness),
lambda(other->lambda), transfer_size(other->transfer_size), data(other->data),
scheduler(other->scheduler), vm_queue(other->vm_queue) {}
Chromosome() {}
//virtual ~Chromosome();
virtual ~Chromosome() {
// TODO Auto-generated destructor stub
}
// Compute the fitness of chromosome
void computeFitness(bool check_storage = true, bool check_sequence = false) {
fill(time_vector.begin(), time_vector.end(), -1);
fill(start_time_vector.begin(), start_time_vector.end(), -1);
vector<double> queue(data->vm_size, 0);
if (check_storage && !checkFiles()) {
std::cerr << "check file error" << endl;
throw;
}
scheduler.clear();
vm_queue.clear();
// compute makespan
for (auto id_task : ordering) {//for each task, do
if (id_task != data->id_root && id_task != data->id_sink) {//if is not root or sink than
if (check_sequence && !checkTaskSeq(id_task)) {
std::cerr << "Encode error - Chrom: Error in the precedence relations." << endl;
throw;
}
// Load Vm
auto vm = data->vm_map.find(allocation[id_task])->second;
auto task = data->task_map.find(id_task)->second;
// update vm queue
auto f_queue = vm_queue.insert(make_pair(vm.id, vector<int>()));
f_queue.first->second.push_back(task.id);
// update scheduler
auto f_scheduler = scheduler.insert(make_pair(vm.id, vector<string>()));
f_scheduler.first->second.push_back(task.tag);
// Compute Task Times
auto start_time = ST(&task, &vm, queue);
auto read_time = RT(&task, &vm);
auto run_time = ceil(task.base_time * vm.slowdown);//seconds
auto write_time = WT(&task, &vm);
auto finish_time = start_time + read_time + run_time + write_time;
//update structures
time_vector[id_task] = finish_time;
start_time_vector[id_task] = start_time;
queue[vm.id] = finish_time;
} else {// root and sink tasks
if (id_task == data->id_root)
time_vector[id_task] = 0;
else {//sink task
double max_value = 0.0;
for (auto tk : data->prec.find(id_task)->second)
max_value = std::max(max_value, time_vector[tk]);
time_vector[id_task] = max_value;
}
}
}
fitness = time_vector[data->id_sink];
}
/*crossover*/
Chromosome crossover(Chromosome* partner) {
Chromosome chr(partner);
uniform_int_distribution<> dis_ordering(0, data->task_size - 1);
uniform_int_distribution<> dis_allocation(0, data->size - 1);
int point_ordering = dis_ordering(engine_chr);//crossover point to ordering list
int point_allocation = dis_allocation(engine_chr);//crossover point to allocation list
//allocation crossover (Single point crossover)
for (int i = 0; i < point_allocation; i++) {
chr.allocation[i] = allocation[i];
}
//ordering crossover
vector<bool> aux(data->task_size, false);
chr.ordering.clear();
//ordering crossover first part self -> chr
for (auto i = 0; i < point_ordering; i++) {
chr.ordering.push_back(ordering[i]);
aux[ordering[i]] = true;
}
//Ordering crossover second part partner -> chr
for (auto i = 0; i < data->task_size; i++) {
if (!aux[partner->ordering[i]])
chr.ordering.push_back(partner->ordering[i]);
}
return chr;
}
//Mutation on allocation chromosome
void mutate(double prob) {
uniform_int_distribution<> idis(0, data->vm_size - 1);
for (int i = 0; i < data->size; i++) {
if (((float) random() / (float) RAND_MAX) <= prob) {
allocation[i] = idis(engine_chr);
}
}
}
void print() {
cout << "#!# " << fitness << endl;
cout << "Tasks: " << endl;
for (auto info : data->vm_map) {
auto vm = info.second;
cout << vm.id << ": ";
auto f = scheduler.find(vm.id);
if (f != scheduler.end()) {
for (auto task_tag: f->second)
cout << task_tag << " ";
}
cout << endl;
}
cout << endl;
/*for(auto info : data->vm_map){
auto vm = info.second;
cout << "[" << vm.id << "]" << " <" << vm.name << "> : ";
auto f = scheduler.find(vm.id);
if(f != scheduler.end()) {
for (auto task_tag : f->second)
cout << task_tag << " ";
}
cout << endl;
}*/
cout << "Files: " << endl;
for (auto info: data->vm_map) {
auto vm = info.second;
cout << vm.id << ": ";
for (auto info : data->file_map) {
auto file = info.second;
int vm_id = file.is_static ? file.static_vm : allocation[file.id];
if (vm_id == vm.id)
cout << file.name << " ";
}
cout << endl;
}
/*for(auto info : data->file_map){
auto file = info.second;
int vm_id = file.is_static ? file.static_vm : allocation[file.id];
cout << "[" << vm_id << ", " << file.name << "]" << " ";
}*/
//cout << endl;
/*cout << "Task Sequence: " << endl;
for(auto task_id : ordering)
if(task_id != data->id_root && task_id && data->id_sink)
cout << data->task_map.find(task_id)->second.name << ", ";
cout << endl;*/
}
// Compute distance between two solutions
int getDistance(const Chromosome &chr) {
int distance = 0;
#pragma omp parallel sections reduction(+:distance)
{
#pragma omp section
{
// compute the distance based on position
for (int i = 0; i < data->size; i++) {
if (chr.allocation[i] != allocation[i]) {
distance += 1;
}
}
}
#pragma omp section
{
// compute the distance based on swaps required
vector<int> aux_ordering(ordering);
for (int i = 0; i < data->task_size; i++) {
if (chr.ordering[i] != aux_ordering[i]) {
distance += 1;
for (int j = i + 1; j < data->task_size; j++) {
if (chr.ordering[i] == aux_ordering[j]) {
iter_swap(aux_ordering.begin() + i, aux_ordering.begin() + j);
}
}
}
}
}
}
return distance;
}
private:
int computeHeightSoft(int node) {
int min = numeric_limits<int>::max();
if (height_soft[node] != -1)
return height_soft[node];
if (node != data->id_sink) {
for (auto i : data->succ.find(node)->second) {
int value = computeHeightSoft(i);
min = std::min(value, min);
}
} else {
height_soft[node] = data->height[node];
return height_soft[node];
}
uniform_int_distribution<> dis(data->height[node], min - 1);
height_soft[node] = dis(engine_chr);
return height_soft[node];
}
// Random encode (new chromosome)
void encode() {
vector<int> seq_list(boost::counting_iterator<int>(0u), boost::counting_iterator<int>(height_soft.size()));
sort(begin(seq_list), end(seq_list), [&](const int &i, const int &j) {
return height_soft[i] < height_soft[j];
});//sort a list based on height soft
for (auto task : seq_list)//encode ordering Chromosome
ordering.push_back(task);
//Encode allocation chromosome
uniform_int_distribution<> dis(0, data->vm_size - 1);
for (int i = 0; i < data->size; i++)
allocation[i] = dis(engine_chr);
}
/* Checks the sequence of tasks is valid */
inline bool checkTaskSeq(int task) {
for (auto tk : data->prec.find(task)->second)
if (time_vector[tk] == -1)
return false;
return true;
}
// Check and organize the file based on the storage capacity
inline bool checkFiles() {
bool flag = true;
int count = 0;
vector<double> aux_storage(data->storage_vet);
vector<int> aux(data->vm_size);
iota(aux.begin(), aux.end(), 0); // 0,1,2,3,4 ... n
unordered_map<int, vector<int> > map_file;
int id_vm;
// build file map and compute the storage
for (auto it : data->file_map) {
if (!it.second.is_static) {
id_vm = allocation[it.second.id];
auto f = map_file.insert(make_pair(id_vm, vector<int>()));
f.first->second.push_back(it.second.id);
auto file = data->file_map.find(it.second.id)->second;
aux_storage[id_vm] -= file.size;
}
}
do {
//sort machines based on the storage capacity
sort(aux.begin(), aux.end(), [&](const int &a, const int &b) {
return aux_storage[a] < aux_storage[b];
});
if (aux_storage[aux[0]] < 0) {//if storage is full, start the file heuristic
cout << "Starting file heuristic ..." << endl;
int old_vm = aux[0]; //critical machine
int new_vm = aux[aux.size() - 1];//best machine
auto vet_file = map_file.find(old_vm)->second;
double min = numeric_limits<double>::max();
int min_file = -1;
//search min file (based on the size of file)
for_each(vet_file.begin(), vet_file.end(), [&](int i) {
cout << i << endl;
auto file = data->file_map.find(i)->second;
cout << file.name << endl;
if (file.size < min) {
min = file.size;
min_file = file.id;
}
});
auto file_min = data->file_map.find(min_file)->second;
cout << file_min.name << endl;
//minFile will be move to machine with more empty space
allocation[file_min.id] = new_vm;
//update aux Storage
aux_storage[old_vm] += file_min.size;
aux_storage[new_vm] -= file_min.size;
//Update mapFile structure
map_file[old_vm].erase(remove(map_file[old_vm].begin(), map_file[old_vm].end(), min_file),
map_file[old_vm].end());
map_file[new_vm].push_back(min_file);
} else flag = false;
count++;
} while (flag && count < data->file_size);
return !flag;
}
// Start Time
inline double ST(Task* task, VMachine* vm, const vector<double> &queue) {
// compute wait time
double max_pred_time = 0.0;
for (auto tk : data->prec.find(task->id)->second)
max_pred_time = std::max(max_pred_time, time_vector[tk]);
return std::max(max_pred_time, queue[vm->id]);
}
// Read time
inline double RT(Task* task, VMachine* vm) {
//compute read time
double read_time = 0;
int id_vm_file;
for (auto id_file : task->input) {
auto file = data->file_map.find(id_file)->second;
if (!file.is_static) {
id_vm_file = allocation[file.id];
// update vm queue
auto f_queue = vm_queue.insert(make_pair(id_vm_file, vector<int>()));
f_queue.first->second.push_back(file.id);
} else
id_vm_file = file.static_vm;
auto vm_file = data->vm_map.find(id_vm_file)->second;
read_time += ceil(TT(&file, vm, &vm_file) + (file.size * lambda));
}
return read_time;
} //Write time
// Write time
inline double WT(Task* task, VMachine* vm) {
//compute the write time
double write_time = 0;
for (auto id_file :task->output) {
auto file = data->file_map.find(id_file)->second;
auto vm_file = data->vm_map.find(allocation[file.id])->second;
// update vm queue
auto f_queue = vm_queue.insert(make_pair(vm_file.id, vector<int>()));
f_queue.first->second.push_back(id_file);
write_time += ceil(TT(&file, vm, &vm_file) + (file.size * (lambda * 2)));
}
return write_time;
}
// Transfer Time
inline double TT(File* file, VMachine* vm1, VMachine* vm2) {
if (vm1->id != vm2->id) {// if vm1 != vm2
//get the smallest link
auto link = std::min(vm1->bandwidth, vm2->bandwidth);
return file->size / link;
}
return 0;//otherwise
}
};
#endif /* CHROM_H_ */
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-6,8),ceild(8*t2-Nz-19,32));t3<=min(floord(4*Nt+Ny-9,32),floord(4*t1+Ny-1,32));t3++) {
for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(32*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(32*t3+Nx+19,256));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),8*t3+6),64*t4+62);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(256*t4,4*t5+4);
ubv=min(256*t4+255,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
Utility.h | /*
* SVRTK : SVR reconstruction based on MIRTK
*
* Copyright 2021- King's College London
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// SVRTK
#include "svrtk/Common.h"
using namespace std;
using namespace mirtk;
// Forward declarations
namespace svrtk {
class Reconstruction;
}
namespace svrtk::Utility {
/**
* @brief Extract specific image ROI.
* @param stack
* @param transformation
* @param min_x
* @param min_y
* @param min_z
* @param max_x
* @param max_y
* @param max_z
*/
void bbox(const RealImage& stack, const RigidTransformation& transformation, double& min_x, double& min_y, double& min_z, double& max_x, double& max_y, double& max_z);
/**
* @brief Crop to non zero ROI.
* @param image
*/
void bboxCrop(RealImage& image);
/**
* @brief Find centroid.
* @param image
* @param x
* @param y
* @param z
*/
void centroid(const RealImage& image, double& x, double& y, double& z);
/**
* @brief Center stacks.
* @param stacks
* @param stack_transformations
* @param templateNumber
*/
void CenterStacks(const Array<RealImage>& stacks, Array<RigidTransformation>& stack_transformations, int templateNumber);
/// Crops the image according to the mask
void CropImage(RealImage& image, const RealImage& mask);
/// GF 190416, useful for handling different slice orders
void CropImageIgnoreZ(RealImage& image, const RealImage& mask);
/**
* @brief Compute NCC between images.
* @param slice_1
* @param slice_2
* @param threshold
* @param count
* @return
*/
double ComputeNCC(const RealImage& slice_1, const RealImage& slice_2, const double threshold = 0.01, double *count = nullptr);
double LocalSSIM(const RealImage slice, const RealImage sim_slice );
/**
* @brief Compute inter-slice volume NCC (motion metric).
* @param input_stack
* @param template_stack
* @param mask
* @return
*/
double VolumeNCC(RealImage& input_stack, RealImage template_stack, const RealImage& mask);
/**
* @brief Compute internal stack statistics (volume and inter-slice NCC).
* @param input_stack
* @param mask
* @param mask_volume
* @param slice_ncc
*/
void StackStats(RealImage input_stack, RealImage& mask, double& mask_volume, double& slice_ncc);
/**
* @brief Run serial global similarity statistics (for the stack selection function).
* @param template_stack
* @param template_mask
* @param stacks
* @param masks
* @param average_ncc
* @param average_volume
* @param current_stack_transformations
*/
void GlobalStackStats(RealImage template_stack, const RealImage& template_mask, const Array<RealImage>& stacks,
const Array<RealImage>& masks, double& average_ncc, double& average_volume,
Array<RigidTransformation>& current_stack_transformations);
// Cropping of stacks based on intersection
void StackIntersection(Array<RealImage>& stacks, RealImage template_mask);
/**
* @brief Run parallel global similarity statistics.
* @param stacks
* @param masks
* @param all_global_ncc_array
* @param all_global_volume_array
*/
void RunParallelGlobalStackStats(const Array<RealImage>& stacks, const Array<RealImage>& masks,
Array<double>& all_global_ncc_array, Array<double>& all_global_volume_array);
/**
* @brief Create average image from the stacks and volumetric transformations.
* @param reconstructor
* @param stacks
* @param stack_transformations
* @return
*/
RealImage CreateAverage(const Reconstruction *reconstructor, const Array<RealImage>& stacks, Array<RigidTransformation>& stack_transformations);
/**
* @brief Create a mask from dark/black background.
* @param reconstructor
* @param stacks
* @param stack_transformations
* @param smooth_mask
*/
void CreateMaskFromBlackBackground(const Reconstruction *reconstructor, const Array<RealImage>& stacks, Array<RigidTransformation> stack_transformations, double smooth_mask);
/**
* @brief Transform and resample mask to the space of the image.
* @param image
* @param mask
* @param transformation
*/
void TransformMask(const RealImage& image, RealImage& mask, const RigidTransformation& transformation);
/**
* @brief Run stack background filtering (GS based).
* @param stacks
* @param fg_sigma
* @param bg_sigma
*/
void BackgroundFiltering(Array<RealImage>& stacks, const double fg_sigma, const double bg_sigma);
/**
* @brief Mask stacks with respect to the reconstruction mask and given transformations.
* @param stacks
* @param stack_transformations
* @param mask
*/
void MaskStacks(Array<RealImage>& stacks, Array<RigidTransformation>& stack_transformations, const RealImage& mask);
/**
* @brief Mask slices based on the reconstruction mask.
* @param slices
* @param mask
* @param Transform
*/
void MaskSlices(Array<RealImage>& slices, const RealImage& mask, function<void(size_t, double&, double&, double&)> Transform);
/**
* @brief Get slice order parameters.
* @param stacks
* @param pack_num
* @param order
* @param step
* @param rewinder
* @param output_z_slice_order
* @param output_t_slice_order
*/
void GetSliceAcquisitionOrder(const Array<RealImage>& stacks, const Array<int>& pack_num, const Array<int>& order,
const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order);
/**
* @brief Split images into varying N packages.
* @param stacks
* @param sliceStacks
* @param pack_num
* @param sliceNums
* @param order
* @param step
* @param rewinder
* @param output_z_slice_order
* @param output_t_slice_order
*/
void FlexibleSplitImage(const Array<RealImage>& stacks, Array<RealImage>& sliceStacks, const Array<int>& pack_num,
const Array<int>& sliceNums, const Array<int>& order, const int step, const int rewinder,
Array<int>& output_z_slice_order, Array<int>& output_t_slice_order);
/**
* @brief Split images based on multi-band acquisition.
* @param stacks
* @param sliceStacks
* @param pack_num
* @param sliceNums
* @param multiband_vector
* @param order
* @param step
* @param rewinder
* @param output_z_slice_order
* @param output_t_slice_order
*/
void FlexibleSplitImagewithMB(const Array<RealImage>& stacks, Array<RealImage>& sliceStacks, const Array<int>& pack_num,
const Array<int>& sliceNums, const Array<int>& multiband_vector, const Array<int>& order, const int step, const int rewinder,
Array<int>& output_z_slice_order, Array<int>& output_t_slice_order);
/**
* @brief Split stacks into packages based on specific order.
* @param stacks
* @param pack_num
* @param packageStacks
* @param order
* @param step
* @param rewinder
* @param output_z_slice_order
* @param output_t_slice_order
*/
void SplitPackages(const Array<RealImage>& stacks, const Array<int>& pack_num, Array<RealImage>& packageStacks, const Array<int>& order,
const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order);
/**
* @brief Split stacks into packages based on specific order (multi-band based).
* @param stacks
* @param pack_num
* @param packageStacks
* @param multiband_vector
* @param order
* @param step
* @param rewinder
* @param output_z_slice_order
* @param output_t_slice_order
*/
void SplitPackageswithMB(const Array<RealImage>& stacks, const Array<int>& pack_num, Array<RealImage>& packageStacks, const Array<int>& multiband_vector,
const Array<int>& order, const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order);
/**
* @brief Split image into N packages.
* @param image
* @param packages
* @param stacks
*/
void SplitImage(const RealImage& image, const int packages, Array<RealImage>& stacks);
/**
* @brief Split image into 2 packages.
* @param image
* @param packages
* @param stacks
*/
void SplitImageEvenOdd(const RealImage& image, const int packages, Array<RealImage>& stacks);
/**
* @brief Split image into 4 packages.
* @param image
* @param packages
* @param stacks
* @param iter
*/
void SplitImageEvenOddHalf(const RealImage& image, const int packages, Array<RealImage>& stacks, const int iter);
/**
* @brief Split image into 2 packages.
* @param image
* @param stacks
*/
void HalfImage(const RealImage& image, Array<RealImage>& stacks);
////////////////////////////////////////////////////////////////////////////////
// Inline/template definitions
////////////////////////////////////////////////////////////////////////////////
/// Clear and preallocate memory for a vector
template<typename VectorType>
inline void ClearAndReserve(vector<VectorType>& vectorVar, size_t reserveSize) {
vectorVar.clear();
vectorVar.reserve(reserveSize);
}
//-------------------------------------------------------------------
/// Clear and resize memory for vectors
template<typename VectorType>
inline void ClearAndResize(vector<VectorType>& vectorVar, size_t reserveSize, const VectorType& defaultValue = VectorType()) {
vectorVar.clear();
vectorVar.resize(reserveSize, defaultValue);
}
//-------------------------------------------------------------------
/**
* @brief Binarise mask.
* If template image has been masked instead of creating the mask in separate
* file, this function can be used to create mask from the template image.
* @param image
* @param threshold
* @return
*/
inline RealImage CreateMask(RealImage image, double threshold = 0.5) {
RealPixel *ptr = image.Data();
#pragma omp parallel for
for (int i = 0; i < image.NumberOfVoxels(); i++)
ptr[i] = ptr[i] > threshold ? 1 : 0;
return image;
}
//-------------------------------------------------------------------
/// Normalise and threshold mask
inline RealImage ThresholdNormalisedMask(RealImage image, double threshold) {
RealPixel smin, smax;
image.GetMinMax(&smin, &smax);
if (smax > 0)
image /= smax;
return CreateMask(image, threshold);
}
//-------------------------------------------------------------------
/// Reset image origin and save it into the output transformation (RealImage/GreyImage/ByteImage)
template<typename ImageType>
inline void ResetOrigin(GenericImage<ImageType>& image, RigidTransformation& transformation) {
double ox, oy, oz;
image.GetOrigin(ox, oy, oz);
image.PutOrigin(0, 0, 0);
transformation.PutTranslationX(ox);
transformation.PutTranslationY(oy);
transformation.PutTranslationZ(oz);
transformation.PutRotationX(0);
transformation.PutRotationY(0);
transformation.PutRotationZ(0);
}
//-------------------------------------------------------------------
/// Perform nonlocal means filtering
inline void NLMFiltering(Array<RealImage>& stacks) {
#pragma omp parallel for
for (int i = 0; i < stacks.size(); i++) {
stacks[i] = NLDenoising::Run(stacks[i], 3, 1);
stacks[i].Write((boost::format("denoised-%1%.nii.gz") % i).str().c_str());
}
}
//-------------------------------------------------------------------
/// Invert stack transformations
inline void InvertStackTransformations(Array<RigidTransformation>& stack_transformations) {
//for each stack
#pragma omp parallel for
for (size_t i = 0; i < stack_transformations.size(); i++) {
//invert transformation for the stacks
stack_transformations[i].Invert();
stack_transformations[i].UpdateParameter();
}
}
//-------------------------------------------------------------------
/// Mask input volume
inline void MaskImage(RealImage& image, const RealImage& mask, double padding = -1) {
if (image.NumberOfVoxels() != mask.NumberOfVoxels())
throw runtime_error("Cannot mask the image - different dimensions");
RealPixel *pr = image.Data();
const RealPixel *pm = mask.Data();
#pragma omp parallel for
for (int i = 0; i < image.NumberOfVoxels(); i++)
if (pm[i] == 0)
pr[i] = padding;
}
//-------------------------------------------------------------------
/// Rescale image ignoring negative values
inline void Rescale(RealImage& img, double max) {
// Get lower and upper bound
RealPixel min_val, max_val;
img.GetMinMax(&min_val, &max_val);
RealPixel *ptr = img.Data();
#pragma omp parallel for
for (int i = 0; i < img.NumberOfVoxels(); i++)
if (ptr[i] > 0)
ptr[i] = double(ptr[i]) / double(max_val) * max;
}
//-------------------------------------------------------------------
/// Apply static mask to 4d volume
inline void StaticMaskVolume4D(RealImage& volume, const RealImage& mask, const double padding) {
#pragma omp parallel for
for (int i = 0; i < volume.GetX(); i++)
for (int j = 0; j < volume.GetY(); j++)
for (int k = 0; k < volume.GetZ(); k++)
if (mask(i, j, k) == 0)
for (int t = 0; t < volume.GetT(); t++)
volume(i, j, k, t) = padding;
}
}
|
GB_AxB_saxpy3_slice_balanced.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: construct balanced tasks for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If the mask is present but must be discarded, this function returns
// GrB_NO_VALUE, to indicate that the analysis was terminated early.
#include "GB_AxB_saxpy3.h"
// control parameters for generating parallel tasks
#define GB_NTASKS_PER_THREAD 2
#define GB_COSTLY 1.2
#define GB_FINE_WORK 2
#define GB_MWORK_ALPHA 0.01
#define GB_MWORK_BETA 0.10
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Fine_fl, int64_t) ; \
GB_WERK_POP (Fine_slice, int64_t) ; \
GB_WERK_POP (Coarse_Work, int64_t) ; \
GB_WERK_POP (Coarse_initial, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_FREE_WORK (&SaxpyTasks, SaxpyTasks_size) ; \
}
//------------------------------------------------------------------------------
// GB_hash_table_size
//------------------------------------------------------------------------------
// flmax is the max flop count for computing A*B(:,j), for any vector j that
// this task computes. If the mask M is present, flmax also includes the
// number of entries in M(:,j). GB_hash_table_size determines the hash table
// size for this task, which is twice the smallest power of 2 larger than
// flmax. If flmax is large enough, the hash_size is returned as cvlen, so
// that Gustavson's method will be used instead of the Hash method.
// By default, Gustavson vs Hash is selected automatically. AxB_method can be
// selected via the descriptor or a global setting, as the non-default
// GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of
// either of those methods. However, if Hash is selected but the hash table
// equals or exceeds cvlen, then Gustavson's method is used instead.
static inline int64_t GB_hash_table_size
(
int64_t flmax, // max flop count for any vector computed by this task
int64_t cvlen, // vector length of C
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
int64_t hash_size ;
if (AxB_method == GxB_AxB_GUSTAVSON || flmax >= cvlen/2)
{
//----------------------------------------------------------------------
// use Gustavson if selected explicitly or if flmax is large
//----------------------------------------------------------------------
hash_size = cvlen ;
}
else
{
//----------------------------------------------------------------------
// flmax is small; consider hash vs Gustavson
//----------------------------------------------------------------------
// hash_size = 2 * (smallest power of 2 >= flmax)
hash_size = ((uint64_t) 2) << (GB_FLOOR_LOG2 (flmax) + 1) ;
bool use_Gustavson ;
if (AxB_method == GxB_AxB_HASH)
{
// always use Hash method, unless the hash_size >= cvlen
use_Gustavson = (hash_size >= cvlen) ;
}
else
{
// default: auto selection:
// use Gustavson's method if hash_size is too big
use_Gustavson = (hash_size >= cvlen/12) ;
}
if (use_Gustavson)
{
hash_size = cvlen ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (hash_size) ;
}
//------------------------------------------------------------------------------
// GB_create_coarse_task: create a single coarse task
//------------------------------------------------------------------------------
// Compute the max flop count for any vector in a coarse task, determine the
// hash table size, and construct the coarse task.
static inline void GB_create_coarse_task
(
int64_t kfirst, // coarse task consists of vectors kfirst:klast
int64_t klast,
GB_saxpy3task_struct *SaxpyTasks,
int taskid, // taskid for this coarse task
int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B
int64_t cvlen, // vector length of B and C
double chunk,
int nthreads_max,
int64_t *Coarse_Work, // workspace for parallel reduction for flop count
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
//--------------------------------------------------------------------------
// find the max # of flops for any vector in this task
//--------------------------------------------------------------------------
int64_t nk = klast - kfirst + 1 ;
int nth = GB_nthreads (nk, chunk, nthreads_max) ;
// each thread finds the max flop count for a subset of the vectors
int tid ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (tid = 0 ; tid < nth ; tid++)
{
int64_t my_flmax = 1, istart, iend ;
GB_PARTITION (istart, iend, nk, tid, nth) ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t kk = kfirst + i ;
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
my_flmax = GB_IMAX (my_flmax, fl) ;
}
Coarse_Work [tid] = my_flmax ;
}
// combine results from each thread
int64_t flmax = 1 ;
for (tid = 0 ; tid < nth ; tid++)
{
flmax = GB_IMAX (flmax, Coarse_Work [tid]) ;
}
// check the parallel computation
#ifdef GB_DEBUG
int64_t flmax2 = 1 ;
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
flmax2 = GB_IMAX (flmax2, fl) ;
}
ASSERT (flmax == flmax2) ;
#endif
//--------------------------------------------------------------------------
// define the coarse task
//--------------------------------------------------------------------------
SaxpyTasks [taskid].start = kfirst ;
SaxpyTasks [taskid].end = klast ;
SaxpyTasks [taskid].vector = -1 ;
SaxpyTasks [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ;
SaxpyTasks [taskid].Hi = NULL ; // assigned later
SaxpyTasks [taskid].Hf = NULL ; // assigned later
SaxpyTasks [taskid].Hx = NULL ; // assigned later
SaxpyTasks [taskid].my_cjnz = 0 ; // for fine tasks only
SaxpyTasks [taskid].leader = taskid ;
SaxpyTasks [taskid].team_size = 1 ;
}
//------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: create balanced tasks for saxpy3
//------------------------------------------------------------------------------
GrB_Info GB_AxB_saxpy3_slice_balanced
(
// inputs
GrB_Matrix C, // output matrix
const GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash
// outputs
GB_saxpy3task_struct **SaxpyTasks_handle,
size_t *SaxpyTasks_size_handle,
bool *apply_mask, // if true, apply M during sapxy3
bool *M_in_place, // if true, use M in-place
int *ntasks, // # of tasks created (coarse and fine)
int *nfine, // # of fine tasks created
int *nthreads, // # of threads to use
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*apply_mask) = false ;
(*M_in_place) = false ;
(*ntasks) = 0 ;
(*nfine) = 0 ;
(*nthreads) = 0 ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT_MATRIX_OK (A, "A for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT_MATRIX_OK (B, "B for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// define result and workspace
//--------------------------------------------------------------------------
GB_saxpy3task_struct *restrict SaxpyTasks = NULL ;
size_t SaxpyTasks_size = 0 ;
GB_WERK_DECLARE (Coarse_initial, int64_t) ; // initial coarse tasks
GB_WERK_DECLARE (Coarse_Work, int64_t) ; // workspace for flop counts
GB_WERK_DECLARE (Fine_slice, int64_t) ;
GB_WERK_DECLARE (Fine_fl, int64_t) ; // size max(nnz(B(:,j)))
//--------------------------------------------------------------------------
// get A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t avlen = A->vlen ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int8_t *restrict Bb = B->b ;
const int64_t *restrict Bi = B->i ;
const int64_t bvdim = B->vdim ;
const int64_t bnz = GB_nnz_held (B) ;
const int64_t bnvec = B->nvec ;
const int64_t bvlen = B->vlen ;
const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ;
int64_t cvlen = avlen ;
int64_t cvdim = bvdim ;
//--------------------------------------------------------------------------
// compute flop counts for each vector of B and C
//--------------------------------------------------------------------------
int64_t Mwork = 0 ;
int64_t *restrict Bflops = C->p ; // use C->p as workspace for Bflops
GB_OK (GB_AxB_saxpy3_flopcount (&Mwork, Bflops, M, Mask_comp, A, B,
Context)) ;
int64_t total_flops = Bflops [bnvec] ;
double axbflops = total_flops - Mwork ;
GBURBLE ("axbwork %g ", axbflops) ;
if (Mwork > 0) GBURBLE ("mwork %g ", (double) Mwork) ;
//--------------------------------------------------------------------------
// determine if the mask M should be applied, or done later
//--------------------------------------------------------------------------
if (M == NULL)
{
//----------------------------------------------------------------------
// M is not present
//----------------------------------------------------------------------
(*apply_mask) = false ;
}
else if (GB_IS_BITMAP (M) || GB_as_if_full (M))
{
//----------------------------------------------------------------------
// M is present and full, bitmap, or sparse/hyper with all entries
//----------------------------------------------------------------------
// Choose all-hash or all-Gustavson tasks, and apply M during saxpy3.
(*apply_mask) = true ;
// The work for M has not yet been added Bflops.
// Each vector M(:,j) has cvlen entries.
Mwork = cvlen * cvdim ;
if (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON))
{
if (axbflops < (double) Mwork * GB_MWORK_BETA)
{
// The mask is too costly to scatter into the Hf workspace.
// Leave it in place and use all-hash tasks.
AxB_method = GxB_AxB_HASH ;
}
else
{
// Scatter M into Hf and use all-Gustavson tasks.
AxB_method = GxB_AxB_GUSTAVSON ;
}
}
if (AxB_method == GxB_AxB_HASH)
{
// Use the hash method for all tasks (except for those tasks which
// require a hash table size >= cvlen; those tasks use Gustavson).
// Do not scatter the mask into the Hf hash workspace. The work
// for the mask is not accounted for in Bflops, so the hash tables
// can be small.
(*M_in_place) = true ;
GBURBLE ("(use mask in-place) ") ;
}
else
{
// Use the Gustavson method for all tasks, and scatter M into the
// fine Gustavson workspace. The work for M is not yet in the
// Bflops cumulative sum. Add it now.
ASSERT (AxB_method == GxB_AxB_GUSTAVSON)
int nth = GB_nthreads (bnvec, chunk, nthreads_max) ;
int64_t kk ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (kk = 0 ; kk <= bnvec ; kk++)
{
Bflops [kk] += cvlen * (kk+1) ;
}
total_flops = Bflops [bnvec] ;
GBURBLE ("(use mask) ") ;
}
}
else if (axbflops < ((double) Mwork * GB_MWORK_ALPHA))
{
//----------------------------------------------------------------------
// M is costly to use; apply it after C=A*B
//----------------------------------------------------------------------
// Do not use M during the computation of A*B. Instead, compute C=A*B
// and then apply the mask later. Tell the caller that the mask should
// not be applied, so that it will be applied later in GB_mxm.
(*apply_mask) = false ;
GBURBLE ("(discard mask) ") ;
GB_FREE_ALL ;
return (GrB_NO_VALUE) ;
}
else
{
//----------------------------------------------------------------------
// use M during saxpy3
//----------------------------------------------------------------------
(*apply_mask) = true ;
GBURBLE ("(use mask) ") ;
}
//--------------------------------------------------------------------------
// determine # of threads and # of initial coarse tasks
//--------------------------------------------------------------------------
(*nthreads) = GB_nthreads ((double) total_flops, chunk, nthreads_max) ;
int ntasks_initial = ((*nthreads) == 1) ? 1 :
(GB_NTASKS_PER_THREAD * (*nthreads)) ;
//--------------------------------------------------------------------------
// give preference to Gustavson when using few threads
//--------------------------------------------------------------------------
if ((*nthreads) <= 8 &&
(!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON)))
{
// Unless a specific method has been explicitly requested, see if
// Gustavson should be used with a small number of threads.
// Matrix-vector has a maximum intensity of 1, so this heuristic only
// applies to GrB_mxm.
double abnz = GB_nnz (A) + GB_nnz (B) + 1 ;
double workspace = (double) ntasks_initial * (double) cvlen ;
double intensity = total_flops / abnz ;
GBURBLE ("(intensity: %0.3g workspace/(nnz(A)+nnz(B)): %0.3g",
intensity, workspace / abnz) ;
if (intensity >= 8 && workspace < abnz)
{
// work intensity is large, and Gustvason workspace is modest;
// use Gustavson for all tasks
AxB_method = GxB_AxB_GUSTAVSON ;
GBURBLE (": select Gustvason) ") ;
}
else
{
// use default task creation: mix of Hash and Gustavson
GBURBLE (") ") ;
}
}
//--------------------------------------------------------------------------
// determine target task size
//--------------------------------------------------------------------------
double target_task_size = ((double) total_flops) / ntasks_initial ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
double target_fine_size = target_task_size / GB_FINE_WORK ;
target_fine_size = GB_IMAX (target_fine_size, chunk) ;
//--------------------------------------------------------------------------
// determine # of parallel tasks
//--------------------------------------------------------------------------
int ncoarse = 0 ; // # of coarse tasks
int max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks
// FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j)
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// construct initial coarse tasks
//----------------------------------------------------------------------
GB_WERK_PUSH (Coarse_initial, ntasks_initial + 1, int64_t) ;
if (Coarse_initial == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse_initial, Bflops, bnvec, ntasks_initial, true) ;
//----------------------------------------------------------------------
// split the work into coarse and fine tasks
//----------------------------------------------------------------------
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
// where j == GBH (Bh, kk)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// vectors kcoarse_start to kk-1 form a single
// coarse task
ncoarse++ ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// vectors kk will be split into multiple fine tasks
max_bjnz = GB_IMAX (max_bjnz, bjnz) ;
int team_size = ceil (jflops / target_fine_size) ;
(*nfine) += team_size ;
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// vectors kcoarse_start to klast-1 form a single
// coarse task
ncoarse++ ;
}
}
else
{
// This coarse task is OK as-is.
ncoarse++ ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
if (bnvec == 1)
{
// If B is a single vector, and is computed by a single thread,
// then a single fine task is used.
(*nfine) = 1 ;
ncoarse = 0 ;
}
else
{
// One thread uses a single coarse task if B is not a vector.
(*nfine) = 0 ;
ncoarse = 1 ;
}
}
(*ntasks) = ncoarse + (*nfine) ;
//--------------------------------------------------------------------------
// allocate the tasks, and workspace to construct fine tasks
//--------------------------------------------------------------------------
SaxpyTasks = GB_MALLOC_WORK ((*ntasks), GB_saxpy3task_struct,
&SaxpyTasks_size) ;
GB_WERK_PUSH (Coarse_Work, nthreads_max, int64_t) ;
if (max_bjnz > 0)
{
// also allocate workspace to construct fine tasks
GB_WERK_PUSH (Fine_slice, (*ntasks)+1, int64_t) ;
// Fine_fl will only fit on the Werk stack if max_bjnz is small,
// but try anyway, in case it fits. It is placed at the top of the
// Werk stack.
GB_WERK_PUSH (Fine_fl, max_bjnz+1, int64_t) ;
}
if (SaxpyTasks == NULL || Coarse_Work == NULL ||
(max_bjnz > 0 && (Fine_slice == NULL || Fine_fl == NULL)))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// clear SaxpyTasks
memset (SaxpyTasks, 0, SaxpyTasks_size) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// create the coarse and fine tasks
//----------------------------------------------------------------------
int nf = 0 ; // fine tasks have task id 0:nfine-1
int nc = (*nfine) ; // coarse task ids are nfine:ntasks-1
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// kcoarse_start:kk-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, kk-1,
SaxpyTasks, nc++, Bflops, cvlen, chunk,
nthreads_max, Coarse_Work, AxB_method) ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// count the work for each entry B(k,j). Do not
// include the work to scan M(:,j), since that will
// be evenly divided between all tasks in this team.
int64_t pB_start = GBP (Bp, kk, bvlen) ;
int nth = GB_nthreads (bjnz, chunk, nthreads_max) ;
int64_t s ;
#pragma omp parallel for num_threads(nth) \
schedule(static)
for (s = 0 ; s < bjnz ; s++)
{
// get B(k,j)
Fine_fl [s] = 1 ;
int64_t pB = pB_start + s ;
if (!GBB (Bb, pB)) continue ;
int64_t k = GBI (Bi, pB, bvlen) ;
// fl = flop count for just A(:,k)*B(k,j)
int64_t pA, pA_end ;
int64_t pleft = 0 ;
GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft,
anvec-1, k, &pA, &pA_end) ;
int64_t fl = pA_end - pA ;
Fine_fl [s] = fl ;
ASSERT (fl >= 0) ;
}
// cumulative sum of flops to compute A*B(:,j)
GB_cumsum (Fine_fl, bjnz, NULL, nth, Context) ;
// slice B(:,j) into fine tasks
int team_size = ceil (jflops / target_fine_size) ;
ASSERT (Fine_slice != NULL) ;
GB_pslice (Fine_slice, Fine_fl, bjnz, team_size, false);
// shared hash table for all fine tasks for A*B(:,j)
int64_t hsize =
GB_hash_table_size (jflops, cvlen, AxB_method) ;
// construct the fine tasks for C(:,j)=A*B(:,j)
int leader = nf ;
for (int fid = 0 ; fid < team_size ; fid++)
{
int64_t pstart = Fine_slice [fid] ;
int64_t pend = Fine_slice [fid+1] ;
int64_t fl = Fine_fl [pend] - Fine_fl [pstart] ;
SaxpyTasks [nf].start = pB_start + pstart ;
SaxpyTasks [nf].end = pB_start + pend - 1 ;
SaxpyTasks [nf].vector = kk ;
SaxpyTasks [nf].hsize = hsize ;
SaxpyTasks [nf].Hi = NULL ; // assigned later
SaxpyTasks [nf].Hf = NULL ; // assigned later
SaxpyTasks [nf].Hx = NULL ; // assigned later
SaxpyTasks [nf].my_cjnz = 0 ;
SaxpyTasks [nf].leader = leader ;
SaxpyTasks [nf].team_size = team_size ;
nf++ ;
}
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// kcoarse_start:klast-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
else
{
// This coarse task is OK as-is.
GB_create_coarse_task (kfirst, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
// create a single coarse task: hash or Gustavson
GB_create_coarse_task (0, bnvec-1, SaxpyTasks, 0, Bflops, cvlen, 1, 1,
Coarse_Work, AxB_method) ;
if (bnvec == 1)
{
// convert the single coarse task into a single fine task
SaxpyTasks [0].start = 0 ; // first entry in B(:,0)
SaxpyTasks [0].end = bnz - 1 ; // last entry in B(:,0)
SaxpyTasks [0].vector = 0 ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
(*SaxpyTasks_handle) = SaxpyTasks ;
(*SaxpyTasks_size_handle) = SaxpyTasks_size ;
return (GrB_SUCCESS) ;
}
|
aspl.c | #include "common.h"
static bool _enable_avx2 = false, _is_profile = false, _enable_disconnected = false;
static char* _bitmap = NULL;
static int _hosts, _switches, _based_switches, _radix, _symmetries, _kind, _elements, _times;
static int *_frontier = NULL, *_distance = NULL, *_next = NULL;
static uint64_t *_A, *_B;
static double _elapsed_time;
static void printb(unsigned int v) {
unsigned int mask = (int)1 << (sizeof(v) * CHAR_BIT - 1);
do putchar(mask & v ? '1' : '0');
while (mask >>= 1);
printf("\n");
}
void ORP_Profile(const char* name, const int kind, const int nodes, const int symmetries,
const double elapsed_time, const unsigned int times);
void ORP_Matmul(const uint64_t *restrict A, uint64_t *restrict B, const int switches, const int radix,
const int *restrict ports, const int *restrict adjacency, const int elements,
const bool enable_avx2);
void ORP_Matmul_s(const uint64_t *restrict A, uint64_t *restrict B, const int switches, const int radix,
const int *restrict ports, const int *restrict adjacency, const int elements,
const bool enable_avx2, const int symmetries);
void ORP_Malloc(uint64_t **a, const size_t s, const bool enable_avx2);
void ORP_Free(uint64_t *a, const bool enable_avx2);
void ORP_declare_local_frontier(int swithces);
void ORP_free_local_frontier();
bool ORP_Check_profile();
int ORP_Get_kind();
int ORP_top_down_step(const int level, const int num_frontier, const int* restrict adjacency,
const int switches, const int radix, const int* restrict ports,
int* restrict frontier, int* restrict next, int* restrict distance);
int ORP_top_down_step_s(const int level, const int num_frontier, const int* restrict adjacency,
const int switches, const int radix, const int* restrict ports,
int* restrict frontier, int* restrict next, int* restrict distance, const int symmetries);
extern double ORP_Get_time();
static bool CHECK_DISCONNECTED()
{
char *val = getenv("ORP_DISCONNECTED");
if(!val){
return false;
}
else{
if(atoi(val) == 1)
return true;
else if(atoi(val) == 0)
return false;
else
ERROR("Unknown ORP_DISCONNECTED value (%d)\n", atoi(val));
}
return false; // dummy
}
static void aspl_mat(const int* restrict h_degree, const int* restrict s_degree, const int* restrict adjacency,
int *diameter, long *sum, double *ASPL)
{
#pragma omp parallel for
for(int i=0;i<_switches*_switches;i++)
_bitmap[i] = NOT_VISITED;
#pragma omp parallel for
for(int i=0;i<_switches*_elements;i++)
_A[i] = _B[i] = 0;
long k = 0, stop_k = ((long)_switches*_switches-_switches)/2, local_sum = 0;
int local_diameter = 0;
#pragma omp parallel for
for(int i=0;i<_switches;i++){
unsigned int offset = i*_elements+i/UINT64_BITS;
_A[offset] = _B[offset] = (0x1ULL << (i%UINT64_BITS));
}
*diameter = 1;
long level = 2;
for(int kk=0;kk<_switches;kk++){
ORP_Matmul(_A, _B, _switches, _radix, s_degree, adjacency, _elements, _enable_avx2);
level++;
#pragma omp parallel for reduction(+:k,local_sum) reduction(max:local_diameter)
for(int i=0;i<_switches;i++){
for(int j=i+1;j<_switches;j++){
int ii = i*_switches+j;
if(_bitmap[ii] == NOT_VISITED){
if(_B[i*_elements+(j/UINT64_BITS)] & (0x1ULL<<(j%UINT64_BITS))){
_bitmap[ii] = VISITED;
k++;
if(h_degree[i] != 0 && h_degree[j] != 0){
local_diameter = MAX(local_diameter, level-2);
local_sum += level * h_degree[i] * h_degree[j];
}
}
else if(_enable_disconnected && (h_degree[i] == 0 || h_degree[j] == 0)){
_bitmap[ii] = VISITED;
k++;
}
}
}
}
*diameter = local_diameter;
if(k == stop_k) break;
// swap A <-> B
uint64_t* tmp = _A;
_A = _B;
_B = tmp;
if(kk == _switches-1)
(*diameter) = _switches;
}
#pragma omp parallel for reduction(+:local_sum)
for(int i=0;i<_switches;i++)
local_sum += (long)h_degree[i] * (h_degree[i] - 1);
*ASPL = local_sum / (double)(((long)_hosts*(_hosts-1))/2);
*sum = local_sum;
*diameter += 2;
}
static void aspl_mat_s(const int* restrict h_degree, const int* restrict s_degree, const int* restrict adjacency,
int *diameter, long *sum, double *ASPL)
{
#pragma omp parallel for
for(int i=0;i<_switches*_based_switches;i++)
_bitmap[i] = NOT_VISITED;
#pragma omp parallel for
for(int i=0;i<_switches*_elements;i++)
_A[i] = _B[i] = 0;
long k = 0, stop_k = ((long)_switches-_based_switches)*_based_switches+(_based_switches * (_based_switches-1)/2), local_sum = 0;
int local_diameter = 0;
#pragma omp parallel for
for(int i=0;i<_based_switches;i++){
unsigned int offset = i*_elements+i/UINT64_BITS;
_A[offset] = _B[offset] = (0x1ULL << (i%UINT64_BITS));
}
*diameter = 1;
long level = 2;
for(int kk=0;kk<_switches;kk++){
ORP_Matmul_s(_A, _B, _switches, _radix, s_degree, adjacency, _elements, _enable_avx2, _symmetries);
level++;
#pragma omp parallel for reduction(+:k,local_sum) reduction(max:local_diameter)
for(int i=0;i<_switches;i++){
int ib = i%_based_switches;
int end = (i < _based_switches)? i : _based_switches;
int ss = (i < _based_switches)? _symmetries * 2 : _symmetries;
for(int j=0;j<end;j++){
int ii = i*_based_switches+j;
if(_bitmap[ii] == NOT_VISITED){
if(_B[i*_elements+(j/UINT64_BITS)] & (0x1ULL<<(j%UINT64_BITS))){
_bitmap[ii] = VISITED;
k++;
if(h_degree[ib] != 0 && h_degree[j] != 0){
local_diameter = MAX(local_diameter, level-2);
local_sum += level * h_degree[ib] * h_degree[j] * ss;
}
}
else if(_enable_disconnected && (h_degree[ib] == 0 || h_degree[j] == 0)){
_bitmap[ii] = VISITED;
k++;
}
}
}
}
*diameter = local_diameter;
if(k == stop_k) break;
// swap A <-> B
uint64_t* tmp = _A;
_A = _B;
_B = tmp;
if(kk == _switches-1)
(*diameter) = _switches;
}
local_sum = local_sum / 2;
#pragma omp parallel for reduction(+:local_sum)
for(int i=0;i<_based_switches;i++)
local_sum += (long)h_degree[i] * (h_degree[i] - 1) * _symmetries;
*ASPL = local_sum / (double)(((long)_hosts*(_hosts-1))/2);
*sum = local_sum;
*diameter += 2;
}
void ORP_Init_aspl_s(const int hosts, const int switches, const int radix, const int symmetries)
{
if(hosts % symmetries != 0)
ERROR("hosts(%d) must be divisible by symmetries(%d)\n", hosts, symmetries);
else if(switches % symmetries != 0)
ERROR("switches(%d) must be divisible by symmetries(%d)\n", switches, symmetries);
_enable_disconnected = CHECK_DISCONNECTED();
_kind = ORP_Get_kind(switches, symmetries);
_based_switches = switches/symmetries;
_elements = (_based_switches+(UINT64_BITS-1))/UINT64_BITS;
#ifdef __AVX2__
if(_elements >= 4){ // For performance
_enable_avx2 = true;
_elements = ((_elements+3)/4)*4; // _elements must be multiple of 4
}
#endif
if(_kind == ASPL_MATRIX){
ORP_Malloc(&_A, switches*_elements*sizeof(uint64_t), _enable_avx2); // uint64_t A[switches][_elements];
ORP_Malloc(&_B, switches*_elements*sizeof(uint64_t), _enable_avx2); // uint64_t B[switches][_elements];
_bitmap = malloc(sizeof(char) * switches * _based_switches); // char _bitmap[switches][_based_switches];
}
else{ // _kind == ASPL_BFS
_frontier = malloc(sizeof(int) * switches);
_distance = malloc(sizeof(int) * switches);
_next = malloc(sizeof(int) * switches);
#ifdef _OPENMP
ORP_declare_local_frontier(switches);
#endif
}
_hosts = hosts;
_switches = switches;
_radix = radix;
_symmetries = symmetries;
_is_profile = ORP_Check_profile();
_elapsed_time = 0;
_times = 0;
}
void ORP_Init_aspl(const int hosts, const int switches, const int radix)
{
ORP_Init_aspl_s(hosts, switches, radix, 1);
}
static void aspl_bfs(const int* restrict h_degree, const int* restrict s_degree, const int* restrict adjacency,
int* diameter, long *sum, double* ASPL)
{
*diameter = 0;
*sum = 0;
for(int s=0;s<_switches;s++){
bool flag = true;
if(h_degree[s] == 0) continue;
int num_frontier = 1, level = 1;
for(int i=0;i<_switches;i++)
_distance[i] = NOT_USED;
_frontier[0] = s;
_distance[s] = level;
while(1){
num_frontier = ORP_top_down_step(level++, num_frontier, adjacency, _switches, _radix, s_degree,
_frontier, _next, _distance);
if(num_frontier == 0) break;
int *tmp = _frontier;
_frontier = _next;
_next = tmp;
}
if(flag){
flag = false;
if(_enable_disconnected){
for(int i=0;i<_switches;i++){
if(_distance[i] == NOT_USED && h_degree[i] != 0){
*diameter = INT_MAX;
return;
}
}
}
else{
for(int i=0;i<_switches;i++){
if(_distance[i] == NOT_USED){
*diameter = INT_MAX;
return;
}
}
}
}
for(int i=s+1;i<_switches;i++){
if(h_degree[i] != 0){
*sum += (long)(_distance[i] + 2) * h_degree[i] * h_degree[s];
*diameter = MAX(*diameter, _distance[i]);
}
}
}
for(int s=0;s<_switches;s++)
*sum += (long)h_degree[s] * (h_degree[s] - 1);
*ASPL = *sum / (double)(((long)_hosts*(_hosts-1))/2);
*diameter += 2;
}
static void aspl_bfs_s(const int* restrict h_degree, const int* restrict s_degree, const int* restrict adjacency,
int* diameter, long *sum, double* ASPL)
{
*diameter = 0;
*sum = 0;
for(int s=0;s<_based_switches;s++){
bool flag = true;
if(h_degree[s] == 0) continue;
int num_frontier = 1, level = 1;
for(int i=0;i<_switches;i++)
_distance[i] = NOT_USED;
_frontier[0] = s;
_distance[s] = level;
while(1){
num_frontier = ORP_top_down_step_s(level++, num_frontier, adjacency, _switches, _radix, s_degree,
_frontier, _next, _distance, _symmetries);
if(num_frontier == 0) break;
int *tmp = _frontier;
_frontier = _next;
_next = tmp;
}
for(int i=s+1;i<_switches;i++)
if(h_degree[i%_based_switches] != 0)
*diameter = MAX(*diameter, _distance[i]);
if(flag){
flag = false;
if(_enable_disconnected){
for(int i=0;i<_switches;i++){
if(_distance[i] == NOT_USED && h_degree[i%_based_switches] != 0){
*diameter = INT_MAX;
return;
}
}
}
else{
for(int i=0;i<_switches;i++){
if(_distance[i] == NOT_USED){
*diameter = INT_MAX;
return;
}
}
}
}
for(int i=0;i<_switches;i++)
if(i!=s)
*sum += (long)(_distance[i] + 2) * h_degree[i%_based_switches] * h_degree[s];
}
*sum = *sum * _symmetries / 2;
for(int s=0;s<_based_switches;s++)
*sum += (long)h_degree[s] * (h_degree[s] - 1) * _symmetries;
*ASPL = *sum / (double)(((long)_hosts*(_hosts-1))/2);
*diameter += 2;
}
void ORP_Finalize_aspl()
{
if(_kind == ASPL_MATRIX){
ORP_Free(_A, _enable_avx2);
ORP_Free(_B, _enable_avx2);
free(_bitmap);
}
else{ // _kind == ASPL_BFS
free(_frontier);
free(_distance);
free(_next);
#ifdef _OPENMP
ORP_free_local_frontier();
#endif
}
if(_is_profile){
#ifdef _OPENMP
ORP_Profile("THREADS", _kind, _switches, _symmetries, _elapsed_time, _times);
#else
ORP_Profile("SERIAL", _kind, _switches, _symmetries, _elapsed_time, _times);
#endif
}
}
void ORP_Set_aspl(const int* restrict h_degree, const int* restrict s_degree, const int* restrict adjacency,
int *diameter, long *sum, double *ASPL)
{
double t = ORP_Get_time();
if(_symmetries == 1){
if(_kind == ASPL_MATRIX)
aspl_mat(h_degree, s_degree, adjacency, diameter, sum, ASPL);
else // _kind == ASPL_MATRIX_BFS
aspl_bfs(h_degree, s_degree, adjacency, diameter, sum, ASPL);
}
else{
if(_kind == ASPL_MATRIX)
aspl_mat_s(h_degree, s_degree, adjacency, diameter, sum, ASPL);
else // _kind == ASPL_MATRIX_BFS
aspl_bfs_s(h_degree, s_degree, adjacency, diameter, sum, ASPL);
}
_elapsed_time += ORP_Get_time() - t;
if(*diameter >= _switches+2){
*diameter = INT_MAX;
*sum = LONG_MAX;
*ASPL = DBL_MAX;
}
_times++;
}
|
unpk_0.c | #include <stdio.h>
#include <stddef.h>
#include <limits.h>
#include "wgrib2.h"
#ifdef USE_OPENMP
#include <omp.h>
#else
#define omp_get_num_threads() 1
#endif
/* 1996 wesley ebisuzaki
*
* Unpack BDS section
*
* input: *bits, pointer to packed integer data
* *bitmap, pointer to bitmap (undefined data), NULL if none
* n_bits, number of bits per packed integer
* n, number of data points (includes undefined data)
* ref, scale: flt[] = ref + scale*packed_int
* output: *flt, pointer to output array
* undefined values filled with UNDEFINED
*
* note: code assumes an integer >= 32 bits
*
* 7/98 v1.2.1 fix bug for bitmaps and nbit >= 25 found by Larry Brasfield
* 2/01 v1.2.2 changed jj from long int to double
* 3/02 v1.2.3 added unpacking extensions for spectral data
* Luis Kornblueh, MPIfM
* 7/06 v.1.2.4 fixed some bug complex packed data was not set to undefined
* 10/15 v.1.2.5 changed n and i to unsigned
* 3/16 v.1.2.6 OpenMP
* 6/16 v.1.2.7 faster OpenMP and optimization
*/
static unsigned int mask[] = {0,1,3,7,15,31,63,127,255};
static double shift[9] = {1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0};
void unpk_0(float *flt, unsigned char *bits0, unsigned char *bitmap0,
int n_bits, unsigned int n, double ref, double scale, double dec_scale) {
unsigned char *bits, *bitmap;
int c_bits, j_bits, nthreads;
unsigned int map_mask, bbits, i, j, k, n_missing, ndef, di;
double jj;
ref = ref * dec_scale;
scale = scale * dec_scale;
bits = bits0;
bitmap = bitmap0;
bbits = 0;
/* assume integer has 32+ bits */
/* optimized code for n_bits <= 25bits */
if (n_bits <= 25) {
n_missing = bitmap ? missing_points(bitmap0, n) : 0;
ndef = n - n_missing;
// 1-cpu: rd_bitstream_flt(bits0, 0, flt+n_missing, n_bits, ndef);
// 1-cpu: for (j = 0; j < ndef; j++) flt[j+n_missing] = ref + scale*flt[j+n_missing];
#pragma omp parallel private(i,j,k)
{
#pragma omp single
{
nthreads = omp_get_num_threads();
di = (ndef + nthreads - 1) / nthreads;
di = ((di + 7) | 7) ^ 7;
}
#pragma omp for
for (i = 0; i < ndef; i += di) {
k = ndef - i;
if (k > di) k = di;
rd_bitstream_flt(bits0 + (i/8)*n_bits, 0, flt+n_missing+i, n_bits, k);
for (j = i+n_missing; j < i+k+n_missing; j++) {
flt[j] = ref + scale*flt[j];
}
}
}
/*
#pragma omp parallel for private(i,j,k)
for (i = 0; i < ndef; i += CACHE_LINE_BITS) {
k = ndef - i;
if (k > CACHE_LINE_BITS) k = CACHE_LINE_BITS;
rd_bitstream_flt(bits0 + (i/8)*n_bits, 0, flt+n_missing+i, n_bits, k);
for (j = i+n_missing; j < i+k+n_missing; j++) {
flt[j] = ref + scale*flt[j];
}
}
*/
if (n_missing != 0) {
j = n_missing;
for (i = 0; i < n; i++) {
/* check bitmap */
if ((i & 7) == 0) bbits = *bitmap++;
if (bbits & 128) {
flt[i] = flt[j++];
}
else {
flt[i] = UNDEFINED;
}
bbits = bbits << 1;
}
}
}
else {
/* older unoptimized code, not often used */
c_bits = 8;
map_mask = 128;
while (n-- > 0) {
if (bitmap) {
j = (*bitmap & map_mask);
if ((map_mask >>= 1) == 0) {
map_mask = 128;
bitmap++;
}
if (j == 0) {
*flt++ = UNDEFINED;
continue;
}
}
jj = 0.0;
j_bits = n_bits;
while (c_bits <= j_bits) {
if (c_bits == 8) {
jj = jj * 256.0 + (double) (*bits++);
j_bits -= 8;
}
else {
jj = (jj * shift[c_bits]) + (double) (*bits & mask[c_bits]);
bits++;
j_bits -= c_bits;
c_bits = 8;
}
}
if (j_bits) {
c_bits -= j_bits;
jj = (jj * shift[j_bits]) + (double) ((*bits >> c_bits) & mask[j_bits]);
}
*flt++ = ref + scale*jj;
}
}
return;
}
|
stencil_hybrid.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "malloc2D.h"
#include "timer.h"
#define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp)
int main(int argc, char *argv[])
{
#pragma omp parallel
if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads());
struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total;
double init_time, flush_time, stencil_time, total_time;
int imax=2002, jmax = 2002;
double** xtmp;
double** x = malloc2D(jmax, imax);
double** xnew = malloc2D(jmax, imax);
int *flush = (int *)malloc(jmax*imax*sizeof(int)*4);
cpu_timer_start(&tstart_total);
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
if (thread_id == 0) cpu_timer_start(&tstart_init);
#pragma omp for
for (int j = 0; j < jmax; j++){
#ifdef OMP_SIMD
#pragma omp simd
#endif
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp for
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
if (thread_id == 0) init_time += cpu_timer_stop(tstart_init);
for (int iter = 0; iter < 10000; iter++){
if (thread_id ==0) cpu_timer_start(&tstart_flush);
#ifdef OMP_SIMD
#pragma omp for simd nowait
#else
#pragma omp for nowait
#endif
for (int l = 1; l < jmax*imax*4; l++){
flush[l] = 1.0;
}
if (thread_id == 0){
flush_time += cpu_timer_stop(tstart_flush);
cpu_timer_start(&tstart_stencil);
}
#pragma omp for
for (int j = 1; j < jmax-1; j++){
#ifdef OMP_SIMD
#pragma omp simd
#endif
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
if (thread_id == 0){
stencil_time += cpu_timer_stop(tstart_stencil);
SWAP_PTR(xnew, x, xtmp);
if (iter%1000 == 0) printf("Iter %d\n",iter);
}
#pragma omp barrier
}
} // end omp parallel
total_time += cpu_timer_stop(tstart_total);
printf("Timing is init %f flush %f stencil %f total %f\n",
init_time,flush_time,stencil_time,total_time);
}
|
pr79428-2.c | /* PR c/79428 */
/* { dg-options "-fopenmp" } */
void
foo ()
{
#pragma omp sections
#pragma omp section /* { dg-error "'#pragma omp section' may only be used in '#pragma omp sections' construct|not allowed" } */
// { dg-error "expected" "end" { target c } .-1 }
// { dg-error "-:expected" "end" { target c++ } .+1 }
|
GB_binop__bset_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__bset_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint8)
// C=scalar+B GB (_bind1st__bset_uint8)
// C=scalar+B' GB (_bind1st_tran__bset_uint8)
// C=A+scalar GB (_bind2nd__bset_uint8)
// C=A'+scalar GB (_bind2nd_tran__bset_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_BITSET (aij, bij, uint8_t, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITSET (x, y, uint8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT8 || GxB_NO_BSET_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bset_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = GB_BITSET (x, bij, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = GB_BITSET (aij, y, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (x, aij, uint8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (aij, y, uint8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
thread_scale_tlp.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See COPYRIGHT in top-level directory.
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <assert.h>
#include "zmtest_abslock.h"
#define TEST_NITER 100000
char cache_lines[640] = {0};
int indices [] = {3,6,1,7,0,2,9,4,8,5};
static void test_thruput()
{
unsigned nthreads = omp_get_max_threads();
zm_abslock_t lock;
zm_abslock_init(&lock);
int cur_nthreads = 0;
printf("#Thread \t HP:Thruput[acqs/s] \t LP Thruput[acqs/s]\n");
for(cur_nthreads=1; cur_nthreads <= nthreads; cur_nthreads++) {
double start_times[cur_nthreads];
double stop_times[cur_nthreads];
#pragma omp parallel num_threads(cur_nthreads)
{
int tid = omp_get_thread_num();
int iter;
start_times[tid] = omp_get_wtime();
for(iter=0; iter<TEST_NITER; iter++){
int err;
if(tid % 2 == 0)
zm_abslock_acquire(&lock);
else
zm_abslock_acquire_l(&lock);
/* Computation */
for(int i = 0; i < 10; i++)
cache_lines[indices[i]] += cache_lines[indices[9-i]];
zm_abslock_release(&lock);
}
stop_times[tid] = omp_get_wtime();
} /* End of omp parallel*/
double htimes = 0.0, ltimes = 0.0;
int i;
for(i=0; i < cur_nthreads; i++) {
if (i % 2 == 0) htimes += (stop_times[i] - start_times[i]);
else ltimes += (stop_times[i] - start_times[i]);
}
int hthreads = (cur_nthreads % 2 == 0) ? cur_nthreads / 2 : (cur_nthreads + 1) / 2;
int lthreads = (cur_nthreads % 2 == 0) ? hthreads : hthreads - 1;
assert(hthreads + lthreads == cur_nthreads);
if(lthreads > 0)
printf("%d \t %lf \t %lf\n", cur_nthreads, ((double)TEST_NITER*hthreads)/htimes, ((double)TEST_NITER*lthreads)/ltimes);
else
printf("%d \t %f \t %f\n", cur_nthreads, ((double)TEST_NITER*hthreads)/htimes, -1.0);
}
} /* end test_locked_counter() */
int main(int argc, char **argv)
{
test_thruput();
return 0;
} /* end main() */
|
GB_bitmap_assign_M_sub_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_M_sub_template: traverse M for GB_SUBASSIGN
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// M is sparse or hypersparse, not bitmap or full. C(I,J)<M>= ... is being
// computed (or !M), and all entries in M are traversed. For a given entry
// M(iM,jM) in the mask, at location pM. The entry C(iC,jC) is at location pC,
// where iC = I [iM] and jC = J [jM]. The matrix C is bitmap or full.
// C is bitmap/full. M is sparse/hyper, and can be jumbled.
{
const int64_t *restrict kfirst_Mslice = M_ek_slicing ;
const int64_t *restrict klast_Mslice = M_ek_slicing + M_ntasks ;
const int64_t *restrict pstart_Mslice = M_ek_slicing + M_ntasks * 2 ;
int tid ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
for (tid = 0 ; tid < M_ntasks ; tid++)
{
int64_t kfirst = kfirst_Mslice [tid] ;
int64_t klast = klast_Mslice [tid] ;
int64_t task_cnvals = 0 ;
//----------------------------------------------------------------------
// traverse over M (:,kfirst:klast)
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of M(:,k) for this task
//------------------------------------------------------------------
int64_t jM = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, tid, k, kfirst,
klast, pstart_Mslice, Mp, mvlen) ;
//------------------------------------------------------------------
// traverse over M(:,jM), the kth vector of M
//------------------------------------------------------------------
// for subassign, M has same size as C(I,J) and A.
int64_t jC = GB_ijlist (J, jM, Jkind, Jcolon) ;
int64_t pC0 = jC * cvlen ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t iM = Mi [pM] ;
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
int64_t pC = iC + pC0 ;
GB_MASK_WORK (pC) ; // operate on Cx [pC]
}
}
}
cnvals += task_cnvals ;
}
}
|
network.h | // == mojo ====================================================================
//
// Copyright (c) gnawice@gnawice.com. All rights reserved.
// See LICENSE in root folder
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files(the "Software"),
// to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to
// whom the Software is furnished to do so, subject to the following
// conditions :
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
// OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
// THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// ============================================================================
// network.h: The main artificial neural network graph for mojo
// ==================================================================== mojo ==
#pragma once
#include <string>
#include <iostream> // cout
#include <fstream>
#include <sstream>
#include <map>
#include <vector>
#include "layer.h"
#include "solver.h"
#include "activation.h"
#include "cost.h"
// hack for VS2010 to handle c++11 for(:)
#if (_MSC_VER == 1600)
#ifndef __for__
#define __for__ for each
#define __in__ in
#endif
#else
#ifndef __for__
#define __for__ for
#define __in__ :
#endif
#endif
#if defined(MOJO_CV2) || defined(MOJO_CV3)
#ifdef MOJO_CV2
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#pragma comment(lib, "opencv_core249")
#pragma comment(lib, "opencv_highgui249")
#pragma comment(lib, "opencv_imgproc249")
#pragma comment(lib, "opencv_contrib249")
#else //#ifdef MOJO_CV3
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#pragma comment(lib, "opencv_world310")
#endif
#endif
namespace mojo {
#if defined(MOJO_CV2) || defined(MOJO_CV3)
// forward declare these for data augmentation
cv::Mat matrix2cv(const mojo::matrix &m, bool uc8 = false);
mojo::matrix cv2matrix(cv::Mat &m);
mojo::matrix transform(const mojo::matrix in, const int x_center, const int y_center, int out_dim, float theta = 0, float scale = 1.f);
#endif
// sleep needed for threading
#ifdef _WIN32
#include <windows.h>
void mojo_sleep(unsigned milliseconds) { Sleep(milliseconds); }
#else
#include <unistd.h>
void mojo_sleep(unsigned milliseconds) { usleep(milliseconds * 1000); }
#endif
#ifdef MOJO_PROFILE_LAYERS
#ifdef _WIN32
//* used for profiling layers
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li)) return;
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
#else
void StartCounter(){}
double GetCounter(){return 0;}
#endif
#endif
//*/
void replace_str(std::string& str, const std::string& from, const std::string& to) {
if (from.empty())
return;
size_t start_pos = 0;
while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx'
}
}
// returns Energy (euclidian distance / 2) and max index
float match_labels(const float *out, const float *target, const int size, int *best_index = NULL)
{
float E = 0;
int max_j = 0;
for (int j = 0; j<size; j++)
{
E += (out[j] - target[j])*(out[j] - target[j]);
if (out[max_j]<out[j]) max_j = j;
}
if (best_index) *best_index = max_j;
E *= 0.5;
return E;
}
// returns index of highest value (argmax)
int arg_max(const float *out, const int size)
{
int max_j = 0;
for (int j = 0; j<size; j++)
if (out[max_j]<out[j])
{max_j = j; }//std::cout <<j<<",";}
return max_j;
}
//----------------------------------------------------------------------
// network
// - class that holds all the layers and connection information
// - runs forward prediction
class network
{
int _size; // output size
int _thread_count; // determines number of layer sets (copys of layers)
int _internal_thread_count; // used for speeding up convolutions, etc..
static const int MAIN_LAYER_SET = 0;
// training related stuff
int _batch_size; // determines number of dW sets
float _skip_energy_level;
bool _smart_train;
std::vector <float> _running_E;
double _running_sum_E;
cost_function *_cost_function;
solver *_solver;
static const unsigned char BATCH_RESERVED = 1, BATCH_FREE = 0, BATCH_COMPLETE = 2;
static const int BATCH_FILLED_COMPLETE = -2, BATCH_FILLED_IN_PROCESS = -1;
#ifdef MOJO_OMP
omp_lock_t _lock_batch;
void lock_batch() {omp_set_lock(&_lock_batch);}
void unlock_batch() {omp_unset_lock(&_lock_batch);}
void init_lock() {omp_init_lock(&_lock_batch);}
void destroy_lock() {omp_destroy_lock(&_lock_batch);}
int get_thread_num() {return omp_get_thread_num();}
#else
void lock_batch() {}
void unlock_batch() {}
void init_lock(){}
void destroy_lock() {}
int get_thread_num() {return 0;}
#endif
public:
// training progress stuff
int train_correct;
int train_skipped;
int stuck_counter;
int train_updates;
int train_samples;
int epoch_count;
int max_epochs;
float best_estimated_accuracy;
int best_accuracy_count;
float old_estimated_accuracy;
float estimated_accuracy;
// data augmentation stuff
int use_augmentation; // 0=off, 1=mojo, 2=opencv
int augment_x, augment_y;
int augment_h_flip, augment_v_flip;
mojo::pad_type augment_pad;
float augment_theta;
float augment_scale;
// here we have multiple sets of the layers to allow threading and batch processing
// a separate layer set is needed for each independent thread
std::vector< std::vector<base_layer *>> layer_sets;
std::map<std::string, int> layer_map; // name-to-index of layer for layer management
std::vector<std::pair<std::string, std::string>> layer_graph; // pairs of names of layers that are connected
std::vector<matrix *> W; // these are the weights between/connecting layers
// these sets are needed because we need copies for each item in mini-batch
std::vector< std::vector<matrix>> dW_sets; // only for training, will have _batch_size of these
std::vector< std::vector<matrix>> dbias_sets; // only for training, will have _batch_size of these
std::vector< unsigned char > batch_open; // only for training, will have _batch_size of these
network(const char* opt_name=NULL): _thread_count(1), _skip_energy_level(0.f), _batch_size(1)
{
_internal_thread_count=1;
_size=0;
_solver = new_solver(opt_name);
_cost_function = NULL;
//std::vector<base_layer *> layer_set;
//layer_sets.push_back(layer_set);
layer_sets.resize(1);
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
_running_sum_E = 0.;
train_correct = 0;
train_samples = 0;
train_skipped = 0;
epoch_count = 0;
max_epochs = 1000;
train_updates = 0;
estimated_accuracy = 0;
old_estimated_accuracy = 0;
stuck_counter = 0;
best_estimated_accuracy=0;
best_accuracy_count=0;
use_augmentation=0;
augment_x = 0; augment_y = 0; augment_h_flip = 0; augment_v_flip = 0;
augment_pad =mojo::edge;
augment_theta=0; augment_scale=0;
init_lock();
#ifdef USE_AF
af::setDevice(0);
af::info();
#endif
}
~network()
{
clear();
if (_cost_function) delete _cost_function;
if(_solver) delete _solver;
destroy_lock();
}
// call clear if you want to load a different configuration/model
void clear()
{
for(int i=0; i<(int)layer_sets.size(); i++)
{
__for__(auto l __in__ layer_sets[i]) delete l;
layer_sets.clear();
}
layer_sets.clear();
__for__(auto w __in__ W) if(w) delete w;
W.clear();
layer_map.clear();
layer_graph.clear();
}
// output size of final layer;
int out_size() {return _size;}
// get input size
bool get_input_size(int *w, int *h, int *c)
{
if(layer_sets[MAIN_LAYER_SET].size()<1) return false;
*w=layer_sets[MAIN_LAYER_SET][0]->node.cols;*h=layer_sets[MAIN_LAYER_SET][0]->node.rows;*c=layer_sets[MAIN_LAYER_SET][0]->node.chans;
return true;
}
// sets up number of layer copies to run over multiple threads
void build_layer_sets()
{
int layer_cnt = (int)layer_sets.size();
if (layer_cnt<_thread_count) layer_sets.resize(_thread_count);
// ToDo: add shrink back / else if(layer_cnt>_thread_count)
sync_layer_sets();
}
inline int get_thread_count() {return _thread_count;}
// must call this with max thread count before constructing layers
// value <1 will result in thread count = # cores (including hyperthreaded)
void enable_external_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) threads = omp_get_num_procs();
_thread_count = threads;
if(_internal_thread_count<=_thread_count) omp_set_num_threads(_thread_count);
omp_set_nested(1);
#else
if (threads < 1) _thread_count = 1;
else _thread_count = threads;
if (threads > 1) bail("must define MOJO_OMP to used threading");
#endif
build_layer_sets();
}
void enable_internal_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) {threads = omp_get_num_procs(); threads = threads-1;} // one less than core count
if(threads<1) _internal_thread_count=1;
else _internal_thread_count=threads;
omp_set_nested(1);
#else
_internal_thread_count=1;
#endif
}
// when using threads, need to get bias data synched between all layer sets,
// call this after bias update in main layer set to copy the bias to the other sets
void sync_layer_sets()
{
for(int i=1; i<(int)layer_sets.size();i++)
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
for(int k=0; k<layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
(layer_sets[i])[j]->bias.x[k]=(layer_sets[MAIN_LAYER_SET])[j]->bias.x[k];
}
// used to add some noise to weights
void heat_weights()
{
__for__(auto w __in__ W)
{
if (!w) continue;
matrix noise(w->cols, w->rows, w->chans);
noise.fill_random_normal(1.f/ noise.size());
//noise *= *w;
*w += noise;
}
}
// used to add some noise to weights
void remove_means()
{
__for__(auto w __in__ W)
if(w) w->remove_mean();
}
// used to push a layer back in the ORDERED list of layers
// if connect_all() is used, then the order of the push_back is used to connect the layers
// when forward or backward propogation, this order is used for the serialized order of calculations
// Layer_name must be unique.
bool push_back(const char *layer_name, const char *layer_config)
{
if(layer_map[layer_name]) return false; //already exists
base_layer *l=new_layer(layer_name, layer_config);
// set map to index
// make sure there is a 'set' to add layers to
if(layer_sets.size()<1)
{
std::vector<base_layer *> layer_set;
layer_sets.push_back(layer_set);
}
// make sure layer_sets are created
build_layer_sets();
layer_map[layer_name] = (int)layer_sets[MAIN_LAYER_SET].size();
layer_sets[MAIN_LAYER_SET].push_back(l);
// upadate as potential last layer - so it sets the out size
_size=l->fan_size();
// add other copies needed for threading
for(int i=1; i<(int)layer_sets.size();i++) layer_sets[i].push_back(new_layer(layer_name, layer_config));
return true;
}
// connect 2 layers together and initialize weights
// top and bottom concepts are reversed from literature
// my 'top' is the input of a forward() pass and the 'bottom' is the output
// perhaps 'top' traditionally comes from the brain model, but my 'top' comes
// from reading order (information flows top to bottom)
void connect(const char *layer_name_top, const char *layer_name_bottom)
{
size_t i_top=layer_map[layer_name_top];
size_t i_bottom=layer_map[layer_name_bottom];
base_layer *l_top= layer_sets[MAIN_LAYER_SET][i_top];
base_layer *l_bottom= layer_sets[MAIN_LAYER_SET][i_bottom];
int w_i=(int)W.size();
matrix *w = l_bottom->new_connection(*l_top, w_i);
W.push_back(w);
layer_graph.push_back(std::make_pair(layer_name_top,layer_name_bottom));
// need to build connections for other batches/threads
for(int i=1; i<(int)layer_sets.size(); i++)
{
l_top= layer_sets[i][i_top];
l_bottom= layer_sets[i][i_bottom];
delete l_bottom->new_connection(*l_top, w_i);
}
// we need to let solver prepare space for stateful information
if (_solver)
{
if (w)_solver->push_back(w->cols, w->rows, w->chans);
else _solver->push_back(1, 1, 1);
}
int fan_in=l_bottom->fan_size();
int fan_out=l_top->fan_size();
// ToDo: this may be broke when 2 layers connect to one. need to fix (i.e. resnet)
// after all connections, run through and do weights with correct fan count
// initialize weights - ToDo: separate and allow users to configure(?)
if (w && l_bottom->has_weights())
{
if (strcmp(l_bottom->p_act->name, "tanh") == 0)
{
// xavier : for tanh
float weight_base = (float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
// float weight_base = (float)(std::sqrt(.25/( (double)fan_in)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "sigmoid") == 0) || (strcmp(l_bottom->p_act->name, "sigmoid") == 0))
{
// xavier : for sigmoid
float weight_base = 4.f*(float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "lrelu") == 0) || (strcmp(l_bottom->p_act->name, "relu") == 0)
|| (strcmp(l_bottom->p_act->name, "vlrelu") == 0) || (strcmp(l_bottom->p_act->name, "elu") == 0))
{
// he : for relu
float weight_base = (float)(std::sqrt(2. / (double)fan_in));
w->fill_random_normal(weight_base);
}
else
{
// lecun : orig
float weight_base = (float)(std::sqrt(1. / (double)fan_in));
w->fill_random_uniform(weight_base);
}
}
else if (w) w->fill(0);
}
// automatically connect all layers in the order they were provided
// easy way to go, but can't deal with branch/highway/resnet/inception types of architectures
void connect_all()
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size()-1; j++)
connect(layer_sets[MAIN_LAYER_SET][j]->name.c_str(), layer_sets[MAIN_LAYER_SET][j+1]->name.c_str());
}
int get_layer_index(const char *name)
{
for (int j = 0; j < (int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->name.compare(name) == 0)
return j;
return -1;
}
// get the list of layers used (but not connection information)
std::string get_configuration()
{
std::string str;
// print all layer configs
for (int j = 0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) str+= " "+ std::to_string((long long)j) +" : " +layer_sets[MAIN_LAYER_SET][j]->name +" : " + layer_sets[MAIN_LAYER_SET][j]->get_config_string();
str += "\n";
// print layer links
if (layer_graph.size() <= 0) return str;
for (int j = 0; j < (int)layer_graph.size(); j++)
{
if (j % 3 == 0) str += " ";
if((j % 3 == 1)|| (j % 3 == 2)) str += ", ";
str +=layer_graph[j].first + "-" + layer_graph[j].second;
if (j % 3 == 2) str += "\n";
}
return str;
}
// performs forward pass and returns class index
// do not delete or modify the returned pointer. it is a live pointer to the last layer in the network
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
int predict_class(const float *in, int _thread_number = -1)
{
const float* out = forward(in, _thread_number);
// for(int i = 0; i < out_size(); i++)
// printf("%d: %f\n", i, out[i]);
return arg_max(out, out_size());
}
//----------------------------------------------------------------------------------------------------------
// F O R W A R D
//
// the main forward pass
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
// train parameter is used to designate the forward pass is used in training (it turns on dropout layers, etc..)
float* forward(const float *in, int _thread_number=-1, int _train=0)
{
if(_thread_number<0) _thread_number=get_thread_num();
if (_thread_number > _thread_count && _thread_count>0) bail("need to enable threading\n");
if (_thread_number >= (int)layer_sets.size()) bail("need to enable threading\n");
//std::cout << get_thread_num() << ",";
// clear nodes to zero & find input layers
std::vector<base_layer *> inputs;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL) inputs.push_back(layer);
layer->set_threading(_internal_thread_count);
layer->node.fill(0.f);
}
// first layer assumed input. copy input to it
const float *in_ptr = in;
//base_layer * layer = layer_sets[_thread_number][0];
//memcpy(layer->node.x, in, sizeof(float)*layer->node.size());
__for__(auto layer __in__ inputs)
{
memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
in_ptr += layer->node.size();
}
//for (int i = 0; i < layer->node.size(); i++)
// layer_sets[_thread_number][0]->node.x[i] = in[i];
// for all layers
__for__(auto layer __in__ layer_sets[_thread_number])
{
// add bias and activate these outputs (they should all be summed up from other branches at this point)
//for(int j=0; j<layer->node.chans; j+=10) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
layer->activate_nodes();
//for(int j=0; j<layer->node.chans; j++) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
// send output signal downstream (note in this code 'top' is input layer, 'bottom' is output - bucking tradition
__for__ (auto &link __in__ layer->forward_linked_layers)
{
// instead of having a list of paired connections, just use the shape of W to determine connections
// this is harder to read, but requires less look-ups
// the 'link' variable is a std::pair created during the connect() call for the layers
int connection_index = link.first;
base_layer *p_bottom = link.second;
// weight distribution of the signal to layers under it
#ifdef MOJO_PROFILE_LAYERS
StartCounter();
#endif
p_bottom->accumulate_signal(*layer, *W[connection_index], _train);
//if (p_bottom->has_weights())
//for(int j=0; j<layer->node.chans; j++)
//int j=0; for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
#ifdef MOJO_PROFILE_LAYERS
std::cout << p_bottom->name << "\t" << GetCounter() << "ms\n";
#endif
}
}
// return pointer to float * result from last layer
/* std::cout << "out:";
for (int i = 0; i < 10; i++)
{
std::cout << layer_sets[_thread_number][layer_sets[_thread_number].size() - 1]->node.x[i] <<",";
}
std::cout << "\n";
*/
return layer_sets[_thread_number][layer_sets[_thread_number].size()-1]->node.x;
}
//----------------------------------------------------------------------------------------------------------
// W R I T E
//
// write parameters to stream/file
// note that this does not persist intermediate training information that could be needed to 'pickup where you left off'
bool write(std::ofstream& ofs, bool binary = false, bool final = false)
{
// save layers
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
// int ignore_cnt = 0;
// for (int j = 0; j<(int)layer_sets[0].size(); j++)
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) ignore_cnt++;
ofs<<"mojo01" << std::endl;
ofs<<(int)(layer_cnt)<<std::endl;
for(int j=0; j<(int)layer_sets[0].size(); j++)
ofs << layer_sets[MAIN_LAYER_SET][j]->name << std::endl << layer_sets[MAIN_LAYER_SET][j]->get_config_string();
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL)
// save graph
ofs<<(int)layer_graph.size()<<std::endl;
for(int j=0; j<(int)layer_graph.size(); j++)
ofs<<layer_graph[j].first << std::endl << layer_graph[j].second << std::endl;
if(binary)
{
ofs<<(int)1<<std::endl; // flags that this is binary data
// binary version to save space if needed
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if(layer_sets[MAIN_LAYER_SET][j]->use_bias())
ofs.write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
// save weights
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
ofs.write((char*)W[j]->x, W[j]->size()*sizeof(float));
}
}
else
{
ofs<<(int)0<<std::endl;
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++) ofs << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << " ";
ofs << std::endl;
}
}
// save weights
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++) ofs << W[j]->x[i] << " ";
ofs << std::endl;
}
}
}
ofs.flush();
return true;
}
bool write(std::string &filename, bool binary = false, bool final = false) {
std::ofstream temp((const char *)filename.c_str(), std::ios::binary);
return write(temp, binary, final);
}//, std::ofstream::binary);
bool write(char *filename, bool binary = false, bool final = false)
{
std::string str= filename;
return write(str, binary, final);
}
// read network from a file/stream
std::string getcleanline(std::istream& ifs)
{
std::string s;
// The characters in the stream are read one-by-one using a std::streambuf.
// That is faster than reading them one-by-one using the std::istream.
// Code that uses streambuf this way must be guarded by a sentry object.
// The sentry object performs various tasks,
// such as thread synchronization and updating the stream state.
std::istream::sentry se(ifs, true);
std::streambuf* sb = ifs.rdbuf();
for (;;) {
int c = sb->sbumpc();
switch (c) {
case '\n':
return s;
case '\r':
if (sb->sgetc() == '\n') sb->sbumpc();
return s;
case EOF:
// Also handle the case when the last line has no line ending
if (s.empty()) ifs.setstate(std::ios::eofbit);
return s;
default:
s += (char)c;
}
}
}
//----------------------------------------------------------------------------------------------------------
// R E A D
//
bool read(std::istream &ifs)
{
if(!ifs.good()) return false;
std::string s;
s = getcleanline(ifs);
int layer_count;
int version = 0;
if (s.compare("mojo01")==0)
{
s = getcleanline(ifs);
layer_count = atoi(s.c_str());
version = 1;
}
else if (s.find("mojo:") == 0)
{
version = -1;
int cnt = 1;
while (!ifs.eof())
{
s = getcleanline(ifs);
if (s.empty()) continue;
if(s[0]=='#') continue;
push_back(int2str(cnt).c_str(), s.c_str());
cnt++;
}
connect_all();
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
else
layer_count = atoi(s.c_str());
// read layer def
std::string layer_name;
std::string layer_def;
for (auto i=0; i<layer_count; i++)
{
layer_name = getcleanline(ifs);
layer_def = getcleanline(ifs);
push_back(layer_name.c_str(),layer_def.c_str());
}
// read graph
int graph_count;
ifs>>graph_count;
getline(ifs,s); // get endline
if (graph_count <= 0)
{
connect_all();
}
else
{
std::string layer_name1;
std::string layer_name2;
for (auto i=0; i<graph_count; i++)
{
layer_name1= getcleanline(ifs);
layer_name2 = getcleanline(ifs);
connect(layer_name1.c_str(),layer_name2.c_str());
}
}
int binary;
s=getcleanline(ifs); // get endline
binary = atoi(s.c_str());
// binary version to save space if needed
if(binary==1)
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
//int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
//int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
//for (int i = 0; i < layer_sets[MAIN_LAYER_SET][j]->bias.size(); i++)
ifs.read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
}
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
{
ifs.read((char*)W[j]->x, W[j]->size()*sizeof(float));
// for(int i = 0; i < W[j]->size(); i++)
// printf("W[j]->x[%d] = %f\n", i, W[j]->x[i]);
}
}
}
else if(binary==0)// text version
{
// read bias
for(int j=0; j<layer_count; j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
// int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
// int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
// for (int i = 0; i < c; i++)
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
{
ifs >> layer_sets[MAIN_LAYER_SET][j]->bias.x[k];
//std::cout << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << ",";
}
ifs.ignore();// getline(ifs, s); // get endline
}
}
// read weights
for (auto j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++) ifs >> W[j]->x[i];
ifs.ignore(); //getline(ifs, s); // get endline
}
}
}
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
bool read(std::string filename)
{
std::ifstream fs(filename.c_str(),std::ios::binary);
if (fs.is_open())
{
bool ret = read(fs);
fs.close();
return ret;
}
else return false;
}
bool read(const char *filename) { return read(std::string(filename)); }
#ifndef MOJO_NO_TRAINING // this is surely broke by now and will need to be fixed
// ===========================================================================
// training part
// ===========================================================================
// resets the state of all batches to 'free' state
void reset_mini_batch() { memset(batch_open.data(), BATCH_FREE, batch_open.size()); }
// sets up number of mini batches (storage for sets of weight deltas)
void set_mini_batch_size(int batch_cnt)
{
if (batch_cnt<1) batch_cnt = 1;
_batch_size = batch_cnt;
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
reset_mini_batch();
}
int get_mini_batch_size() { return _batch_size; }
// return index of next free batch
// or returns -2 (BATCH_FILLED_COMPLETE) if no free batches - all complete (need a sync call)
// or returns -1 (BATCH_FILLED_IN_PROCESS) if no free batches - some still in progress (must wait to see if one frees)
int get_next_open_batch()
{
int reserved = 0;
int filled = 0;
for (int i = 0; i<batch_open.size(); i++)
{
if (batch_open[i] == BATCH_FREE) return i;
if (batch_open[i] == BATCH_RESERVED) reserved++;
if (batch_open[i] == BATCH_COMPLETE) filled++;
}
if (reserved>0) return BATCH_FILLED_IN_PROCESS; // all filled but wainting for reserves
if (filled == batch_open.size()) return BATCH_FILLED_COMPLETE; // all filled and complete
bail("threading error"); // should not get here unless threading problem
}
//----------------------------------------------------------------------------------------------------------
// s y n c m i n i b a t c h
//
// apply all weights to first set of dW, then apply to model weights
void sync_mini_batch()
{
// need to ensure no batches in progress (reserved)
int next = get_next_open_batch();
if (next == BATCH_FILLED_IN_PROCESS) bail("thread lock");
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
base_layer *layer;
// sum contributions
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
// if batch free, then make sure it is zero'd out because we will increment dW set [0]
if (batch_open[0] == BATCH_FREE) dW_sets[0][w_index].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dW_sets[0][w_index] += dW_sets[b][w_index];
}
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
// bias stuff... that needs to be fixed for conv layers perhaps
if (batch_open[0] == BATCH_FREE) dbias_sets[0][k].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dbias_sets[0][k] += dbias_sets[b][k];
}
}
// update weights
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
if (dW_sets[MAIN_LAYER_SET][w_index].size() > 0)
if(W[w_index]) _solver->increment_w(W[w_index], w_index, dW_sets[MAIN_LAYER_SET][w_index]); // -- 10%
}
layer->update_bias(dbias_sets[0][k], _solver->learning_rate);
}
// prepare to start mini batch over
reset_mini_batch();
train_updates++; // could have no updates .. so this is not exact
sync_layer_sets();
}
// reserve_next.. is used to reserve a space in the minibatch for the existing training sample
int reserve_next_batch()
{
lock_batch();
int my_batch_index = -3;
while (my_batch_index < 0)
{
my_batch_index = get_next_open_batch();
if (my_batch_index >= 0) // valid index
{
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
else if (my_batch_index == BATCH_FILLED_COMPLETE) // all index are complete
{
sync_mini_batch(); // resets _batch_index to 0
my_batch_index = get_next_open_batch();
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
// need to wait for ones in progress to finish
unlock_batch();
mojo_sleep(1);
lock_batch();
}
return -3;
}
float get_learning_rate() {if(!_solver) bail("set solver"); return _solver->learning_rate;}
void set_learning_rate(float alpha) {if(!_solver) bail("set solver"); _solver->learning_rate=alpha;}
void reset_solver() {if(!_solver) bail("set solver"); _solver->reset();}
bool get_smart_training() {return _smart_train;}
void set_smart_training(bool _use_train) { _smart_train = _use_train;}
float get_smart_train_level() { return _skip_energy_level; }
void set_smart_train_level(float _level) { _skip_energy_level = _level; }
void set_max_epochs(int max_e) { if (max_e <= 0) max_e = 1; max_epochs = max_e; }
int get_epoch() { return epoch_count; }
// goal here is to update the weights W.
// use w_new = w_old - alpha dE/dw
// E = sum: 1/2*||y-target||^2
// note y = f(x*w)
// dE = (target-y)*dy/dw = (target-y)*df/dw = (target-y)*df/dx* dx/dw = (target-y) * df * y_prev
// similarly for cross entropy
// ===========================================================================
// training part
// ===========================================================================
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 1;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = 0;
augment_scale = 0;
}
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, float rotation_deg, float scale, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 2;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = rotation_deg;
augment_scale = scale;
}
// call before starting training for current epoch
void start_epoch(std::string loss_function="mse")
{
_cost_function=new_cost_function(loss_function);
train_correct = 0;
train_skipped = 0;
train_updates = 0;
train_samples = 0;
if (epoch_count == 0) reset_solver();
// accuracy not improving .. slow learning
if(_smart_train && (best_accuracy_count > 4))
{
stuck_counter++;
set_learning_rate((0.5f)*get_learning_rate());
if (get_learning_rate() < 0.000001f)
{
// heat_weights();
set_learning_rate(0.000001f);
stuck_counter++;// end of the line.. so speed up end
}
best_accuracy_count = 0;
}
old_estimated_accuracy = estimated_accuracy;
estimated_accuracy = 0;
//_skip_energy_level = 0.05;
_running_sum_E = 0;
}
// time to stop?
bool elvis_left_the_building()
{
// 2 stuck x 4 non best accuracy to quit = 8 times no improvement
if ((epoch_count>max_epochs) || (stuck_counter > 3)) return true;
else return false;
}
// call after putting all training samples through this epoch
bool end_epoch()
{
// run leftovers through mini-batch
sync_mini_batch();
epoch_count++;
// estimate accuracy of validation run
estimated_accuracy = 100.f*train_correct / train_samples;
if (train_correct > best_estimated_accuracy)
{
best_estimated_accuracy = (float)train_correct;
best_accuracy_count = 0;
stuck_counter = 0;
}
else best_accuracy_count++;
return elvis_left_the_building();
}
// if smart training was thinking about exiting, calling reset will make it think everything is OK
void reset_smart_training()
{
stuck_counter=0;
best_accuracy_count = 0;
best_estimated_accuracy = 0;
}
//----------------------------------------------------------------------------------------------------------
// u p d a t e _ s m a r t _ t r a i n
//
void update_smart_train(const float E, bool correct)
{
#ifdef MOJO_OMP
#pragma omp critical
#endif
{
train_samples++;
if (correct) train_correct++;
if (_smart_train)
{
_running_E.push_back(E);
_running_sum_E += E;
const int SMART_TRAIN_SAMPLE_SIZE = 1000;
int s = (int)_running_E.size();
if (s >= SMART_TRAIN_SAMPLE_SIZE)
{
_running_sum_E /= (double)s;
std::sort(_running_E.begin(), _running_E.end());
float top_fraction = (float)_running_sum_E*10.f; //10.
const float max_fraction = 0.75f;
const float min_fraction = 0.075f;// 0.03f;
if (top_fraction > max_fraction) top_fraction = max_fraction;
if (top_fraction < min_fraction) top_fraction = min_fraction;
int index = s - 1 - (int)(top_fraction*(s - 1));
if (_running_E[index] > 0) _skip_energy_level = _running_E[index];
_running_sum_E = 0;
_running_E.clear();
}
}
if (E > 0 && E < _skip_energy_level)
{
//std::cout << "E=" << E;
train_skipped++;
}
} // omp critical
}
// finish back propogation through the hidden layers
void backward_hidden(const int my_batch_index, const int thread_number)
{
const int layer_cnt = (int)layer_sets[thread_number].size();
const int last_layer_index = layer_cnt - 1;
base_layer *layer;// = layer_sets[thread_number][last_layer_index];
// update hidden layers
// start at lower layer and push information up to previous layer
// handle dropout first
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
// all the signals should be summed up to this layer by now, so we go through and take the grad of activiation
int nodes = layer->node.size();
// already did last layer, so skip it
if (k< last_layer_index)
for (int i = 0; i< nodes; i++)
layer->delta.x[i] *= layer->df(layer->node.x, i, nodes);
// now pass that signal upstream
__for__(auto &link __in__ layer->backward_linked_layers) // --- 50% of time this loop
{
base_layer *p_top = link.second;
// note all the delta[connections[i].second] should have been calculated by time we get here
layer->distribute_delta(*p_top, *W[link.first]);
}
}
// update weights - shouldn't matter the direction we update these
// we can stay in backwards direction...
// it was not faster to combine distribute_delta and increment_w into the same loop
int size_W = (int)W.size();
dW_sets[my_batch_index].resize(size_W);
dbias_sets[my_batch_index].resize(layer_cnt);
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
base_layer *p_top = link.second;
int w_index = (int)link.first;
//if (dynamic_cast<max_pooling_layer*> (layer) != NULL) continue;
layer->calculate_dw(*p_top, dW_sets[my_batch_index][w_index]);// --- 20%
// moved this out to sync_mini_batch();
//_solver->increment_w( W[w_index],w_index, dW_sets[_batch_index][w_index]); // -- 10%
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
dbias_sets[my_batch_index][k] = layer->delta;
}
// if all batches finished, update weights
lock_batch();
batch_open[my_batch_index] = BATCH_COMPLETE;
int next_index = get_next_open_batch();
if (next_index == BATCH_FILLED_COMPLETE) // all complete
sync_mini_batch(); // resets _batch_index to 0
unlock_batch();
}
mojo::matrix make_input(float *in, const int _thread_number)
{
mojo::matrix augmented_input;// = auto_augmentation();
std::vector<base_layer *> inputs;
int in_size = 0;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL)
{
inputs.push_back(layer);
in_size += layer->node.size();
}
}
if (use_augmentation > 0)
{
augmented_input.resize(in_size, 1, 1);
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
bool flip_h = ((rand() % 2)*augment_h_flip) ? true: false;
bool flip_v = ((rand() % 2)*augment_v_flip) ? true: false;
int shift_x = (rand() % (augment_x * 2 + 1)) - augment_x;
int shift_y = (rand() % (augment_y * 2 + 1)) - augment_y;
int offset = 0;
__for__(auto layer __in__ inputs)
{
//memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
//in_ptr += layer->node.size();
// copy input to matrix type
mojo::matrix m(layer->node.cols, layer->node.rows, layer->node.chans, in + offset);
if (m.rows > 1 && m.cols > 1)
{
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if ((augment_theta > 0 || augment_scale > 0))
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1 + s);
#endif
if (flip_v)m = m.flip_cols();
if (flip_h) m = m.flip_rows();
mojo::matrix aug = m.shift(shift_x, shift_y, augment_pad);
memcpy(augmented_input.x + offset, aug.x, sizeof(float)*aug.size());
offset += aug.size();
}
else
{
memcpy(augmented_input.x + offset, m.x, sizeof(float)*m.size());
offset += m.size();
}
}
// input = augmented_input.x;
}
else
{
augmented_input.resize(in_size, 1, 1);
memcpy(augmented_input.x, in, sizeof(float)*in_size);
}
return augmented_input;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N C L A S S
//
// after starting epoch, call this to train against a class label
// label_index must be 0 to out_size()-1
// for thread safety, you must pass in the thread_index if calling from different threads
bool train_class(float *in, int label_index, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("call allow_threads()");
const int thread_number = _thread_number;
/*
mojo::matrix augmented_input = make_input(in, thread_number);
/*/
float *input = in;
mojo::matrix augmented_input;
if (use_augmentation > 0)
{
//augment_h_flip = flip_h;
//augment_v_flip = flip_v;
// copy input to matrix type
mojo::matrix m(layer_sets[thread_number][0]->node.cols, layer_sets[thread_number][0]->node.rows, layer_sets[thread_number][0]->node.chans, in);
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if (augment_theta > 0 || augment_scale > 0)
{
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1+s);
}
#endif
if (augment_h_flip)
if ((rand() % 2) == 0)
m = m.flip_cols();
if (augment_v_flip)
if ((rand() % 2) == 0)
m = m.flip_rows();
augmented_input = m.shift((rand() % (augment_x * 2 + 1)) - augment_x, (rand() % (augment_y * 2 + 1)) - augment_y, augment_pad);
input = augmented_input.x;
}
//*/
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
forward(input, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
const int layer_delta_size = layer->delta.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
std::vector<float> target;
if((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0)|| (std::string("brokemax").compare(layer->p_act->name) == 0))
target = std::vector<float>(layer_node_size, 0);
else
target = std::vector<float>(layer_node_size, -1);
if(label_index>=0 && label_index<layer_node_size) target[label_index] = 1;
//const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if(cost_activation_type>0)
layer->delta.x[j] = cost_activation_type*(layer->node.x[j]- target[j]);
else
layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
float f= mse::cost(layer->node.x[j], target[j]);
E += f;//mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
if ((max_j_target == max_j_out)) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N T A R G E T
//
// after starting epoch, call this to train against a target vector
// for thread safety, you must pass in the thread_index if calling from different threads
// if positive=1, goal is to minimize the distance between in and target
bool train_target(float *in, float *target, int positive=1, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("need to enable OMP");
const int thread_number = _thread_number;
mojo::matrix augmented_input = make_input(in, thread_number);
float *input = augmented_input.x;
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
float *out=forward(in, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
//int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
// std::vector<float> target;
//if ((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0))
// target = std::vector<float>(layer_node_size, 0);
// else
// target = std::vector<float>(layer_node_size, -1);
// if (label_index >= 0 && label_index<layer_node_size) target[label_index] = 1;
const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("brokemax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if (positive) // want to minimize distance
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(layer->node.x[j] - target[j]);
else
layer->delta.x[j] = grad_fudge*_cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
}
else
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(1.f-abs(layer->node.x[j] - target[j]));
else
layer->delta.x[j] = grad_fudge*(1.f-abs(_cost_function->d_cost(layer->node.x[j], target[j])))*layer->df(layer->node.x, j, layer_node_size);
}
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
// L2 distance x 2
E += mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
// FIxME if ((max_j_target == max_j_out)) match = true;
if (E < 0.01 && positive) match = true;
else if (E > 0.1 && !positive) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
#else
float get_learning_rate() {return 0;}
void set_learning_rate(float alpha) {}
void train(float *in, float *target){}
void reset() {}
float get_smart_train_level() {return 0;}
void set_smart_train_level(float _level) {}
bool get_smart_train() { return false; }
void set_smart_train(bool _use) {}
#endif
};
}
|
GB_unop__log2_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fp64_fp64)
// op(A') function: GB (_unop_tran__log2_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = log2 (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2 (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = log2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log2 (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log2 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
blake2sp.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <u.h>
#include <libc.h>
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 8
/*
blake2sp_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P )
{
int err = blake2s_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2sp_init_leaf( blake2s_state *S, u64int outlen, u64int keylen, u64int offset )
{
blake2s_param P[1];
P->digest_length = (u8int)outlen;
P->key_length = (u8int)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store16( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2sp_init_leaf_param( S, P );
}
static int blake2sp_init_root( blake2s_state *S, u64int outlen, u64int keylen )
{
blake2s_param P[1];
P->digest_length = (u8int)outlen;
P->key_length = (u8int)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store16( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, u64int outlen )
{
u64int i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, u64int outlen, const void *key, u64int keylen )
{
u64int i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
u8int block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
memset(block, 0, BLAKE2S_BLOCKBYTES); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const void *pin, u64int inlen )
{
const unsigned char * in = (const unsigned char *)pin;
u64int left = S->buflen;
u64int fill = sizeof( S->buf ) - left;
u64int i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
u64int i = omp_get_thread_num();
#endif
u64int inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, void *out, u64int outlen )
{
u8int hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
u64int i;
if(out == nil || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
u64int left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, S->outlen );
}
int blake2sp( void *out, u64int outlen, const void *in, u64int inlen, const void *key, u64int keylen )
{
u8int hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
u64int i;
/* Verify parameters */
if ( nil == in && inlen > 0 ) return -1;
if ( nil == out ) return -1;
if ( nil == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
u8int block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
memset( block, 0, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
u64int i = omp_get_thread_num();
#endif
u64int inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2S_BLOCKBYTES )
{
const u64int left = inlen__ - i * BLAKE2S_BLOCKBYTES;
const u64int len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[i], in__, len );
}
blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
u8int key[BLAKE2S_KEYBYTES];
u8int buf[BLAKE2_KAT_LENGTH];
u64int i, step;
for( i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( u8int )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( u8int )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
u8int hash[BLAKE2S_OUTBYTES];
blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
u8int hash[BLAKE2S_OUTBYTES];
blake2sp_state S;
u8int * p = buf;
u64int mlen = i;
int err = 0;
if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2sp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2sp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) {
goto fail;
}
}
}
print("ok\n");
exits(nil);
return 0;
fail:
print("error\n");
return -1;
}
#endif
|
bwabridge.c | #include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "util.h"
#include "bwabridge.h"
/***************************************************************************
* This is all translated from Lariat's BWA bridge. *
* https://github.com/10XGenomics/lariat/blob/master/go/src/gobwa/gobwa.go *
***************************************************************************/
extern mem_alnreg_v mem_align1_core(const mem_opt_t *opt, const bwt_t *bwt, const bntseq_t *bns, const uint8_t *pac, int l_seq, char *seq, void *buf);
extern mem_chain_v mem_chain(const mem_opt_t *opt, const bwt_t *bwt, const bntseq_t *bns, int len, const uint8_t *seq, void *buf);
extern mem_aln_t mem_reg2aln(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, int l_seq, const char *seq, const mem_alnreg_t *ar);
extern int mem_matesw(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, const mem_pestat_t pes[4], const mem_alnreg_t *a, int l_ms, const uint8_t *ms, mem_alnreg_v *ma);
extern uint8_t *bns_fetch_seq(const bntseq_t *bns, const uint8_t *pac, int64_t *beg, int64_t mid, int64_t *end, int *rid);
/* Arena */
#define ARENA_INIT_CAP 100
typedef struct {
void **p;
size_t len;
size_t cap;
} Arena;
static Arena arena = {NULL, 0ULL, 0ULL};
#pragma omp threadprivate(arena)
void arena_init(void)
{
if (arena.p == NULL) {
arena.p = safe_malloc(ARENA_INIT_CAP * sizeof(void *));
arena.len = 0;
arena.cap = ARENA_INIT_CAP;
}
}
#pragma omp threadprivate(arena)
void arena_clear(void)
{
for (size_t i = 0; i < arena.len; i++)
free(arena.p[i]);
arena.len = 0;
}
#pragma omp threadprivate(arena)
void arena_destroy(void)
{
arena_clear();
free(arena.p);
arena.p = NULL;
arena.len = 0;
arena.cap = 0;
}
#pragma omp threadprivate(arena)
void arena_push(void *p)
{
if (arena.len == arena.cap) {
const size_t new_cap = (arena.len * 3)/2 + 1;
arena.p = safe_realloc(arena.p, new_cap * sizeof(void *));
arena.cap = new_cap;
}
arena.p[arena.len++] = p;
}
/* Convenience wrappers */
char **contig_ids;
bwaidx_t *load_reference(const char *path)
{
bwaidx_t *ref = bwa_idx_load(path, BWA_IDX_ALL);
if (ref == NULL) {
fprintf(stderr, "error: could not load reference at %s\n", path);
exit(EXIT_FAILURE);
}
bntseq_t *contigs = ref->bns;
int32_t n_contigs = contigs->n_seqs;
contig_ids = safe_malloc((n_contigs + 1) * sizeof(*contig_ids));
for (int32_t i = 0; i < n_contigs; i++) {
contig_ids[i] = contigs->anns[i].name;
}
contig_ids[n_contigs] = NULL;
return ref;
}
static int32_t get_contig_id(const char *chrom)
{
for (int32_t i = 0; contig_ids[i] != NULL; i++) {
if (strcmp(chrom, contig_ids[i]) == 0)
return i;
}
return -1;
}
void get_seq(bwaidx_t *ref, char *chrom, int64_t start, int64_t end, int rev, char *out)
{
static const char two_bit_to_seq[] = {'A', 'C', 'G', 'T'};
static const char two_bit_to_seq_comp[] = {'T', 'G', 'C', 'A'};
bntseq_t *contigs = ref->bns;
int32_t contig_id = get_contig_id(chrom);
assert(contig_id >= 0);
bntann1_t *contig = &contigs->anns[contig_id];
int64_t offset = contig->offset;
int64_t offstart = start + offset;
int64_t offend = end + offset;
uint8_t *result = bns_fetch_seq(ref->bns, ref->pac, &offstart, (offstart + offend) >> 1, &offend, &contig_id);
size_t len = offend - offstart;
if (rev) {
for (size_t i = 0; i < len; i++)
out[len - i - 1] = two_bit_to_seq_comp[result[i]];
} else {
for (size_t i = 0; i < len; i++)
out[i] = two_bit_to_seq[result[i]];
}
out[len] = '\0';
free(result);
}
ContigInfo *get_ref_contigs_info(bwaidx_t *ref)
{
bntseq_t *contigs = ref->bns;
int32_t n_contigs = contigs->n_seqs;
ContigInfo *cinfo = safe_malloc(n_contigs * sizeof(*cinfo));
for (int32_t i = 0; i < n_contigs; i++) {
bntann1_t *contig = &contigs->anns[i];
cinfo[i] = (ContigInfo){ .name = contig->name, .len = contig->len };
}
return cinfo;
}
static char *seq_convert(const char *seq, const size_t len)
{
char *buf = safe_malloc(len * sizeof(*buf));
for (size_t i = 0; i < len; i++)
buf[i] = nst_nt4_table[(int)seq[i]];
return buf;
}
typedef struct {
int64_t pos; // forward strand 5'-end mapping position
int rid; // reference sequence index in bntseq_t; <0 for unmapped
int flag; // extra flag
uint32_t flag2; //uint32_t is_rev:1, is_alt:1, mapq:8, NM:22; // is_rev: whether on the reverse strand; mapq: mapping quality; NM: edit distance
int n_cigar; // number of CIGAR operations
uint32_t *cigar; // CIGAR in the BAM encoding: opLen<<4|op; op to integer mapping: MIDSH=>01234
char *XA; // alternative mappings
int score, sub, alt_sc;
} mem_aln_compact_flag_t;
EasyAlignment *bwa_align(bwaidx_t *ref, mem_opt_t *opts, char *seq, const size_t len)
{
char *seq_conv = seq_convert(seq, len);
mem_alnreg_v results = mem_align1_core(opts, ref->bwt, ref->bns, ref->pac, len, seq_conv, NULL);
EasyAlignment *aligns = safe_malloc((results.n + 1) * sizeof(*aligns));
arena_push(results.a);
for (size_t i = 0; i < results.n; i++) {
mem_alnreg_t *a = &results.a[i];
interpret_align(ref, a, &aligns[i]);
}
aligns[results.n].offset = -1;
free(seq_conv);
return aligns;
}
Chain *bwa_chain(bwaidx_t *ref, mem_opt_t *opts, char *seq, const size_t len)
{
uint8_t *seq_conv = (uint8_t *)seq_convert(seq, len);
mem_chain_v results = mem_chain(opts, ref->bwt, ref->bns, len, seq_conv, NULL);
Chain *chains = safe_malloc(results.n * sizeof(*chains));
for (size_t i = 0; i < results.n; i++) {
mem_chain_t *a = &results.a[i];
interpret_chain(ref, a, &chains[i]);
}
free(seq_conv);
return chains;
}
EasyAlignmentPairs bwa_mem_mate_sw(bwaidx_t *ref,
mem_opt_t *opts,
char *read1,
const size_t len1,
char *read2,
const size_t len2,
const int score_delta)
{
#define N_PES 4
static int init = 0;
static mem_pestat_t pes[N_PES];
if (!init) {
pes[0].failed = 1;
pes[1].failed = 0;
pes[2].failed = 1;
pes[3].failed = 1;
for (int i = 0; i < N_PES; i++) {
pes[i].low = -35;
pes[i].high = 500;
pes[i].avg = 200.0;
pes[i].std = 100.0;
}
init = 1;
}
#undef N_PES
char *seq1_conv = seq_convert(read1, len1);
char *seq2_conv = seq_convert(read2, len2);
mem_alnreg_v results1 = mem_align1_core(opts, ref->bwt, ref->bns, ref->pac, len1, seq1_conv, NULL);
mem_alnreg_v results2 = mem_align1_core(opts, ref->bwt, ref->bns, ref->pac, len2, seq2_conv, NULL);
EasyAlignment *aligns1 = safe_malloc(results1.n * sizeof(*aligns1));
EasyAlignment *aligns2 = safe_malloc(results2.n * sizeof(*aligns2));
int best_score1 = 0;
int best_score2 = 0;
for (size_t i = 0; i < results1.n; i++) {
mem_alnreg_t *a = &results1.a[i];
EasyAlignment *p = &aligns1[i];
interpret_align(ref, a, p);
if (p->score > best_score1)
best_score1 = p->score;
}
for (size_t i = 0; i < results2.n; i++) {
mem_alnreg_t *a = &results2.a[i];
EasyAlignment *p = &aligns2[i];
interpret_align(ref, a, p);
if (p->score > best_score2)
best_score2 = p->score;
}
int num = 0;
for (size_t i = 0; i < results2.n && num < 50; i++) {
if (aligns2[i].score >= best_score2 - score_delta) {
++num;
mem_matesw(opts, ref->bns, ref->pac, &pes[0], aligns2[i].chained_hit, len1, (uint8_t *)seq1_conv, &results1);
}
}
aligns1 = safe_realloc(aligns1, results1.n * sizeof(*aligns1));
for (size_t i = 0; i < results1.n; i++) {
mem_alnreg_t *a = &results1.a[i];
interpret_align(ref, a, &aligns1[i]);
}
num = 0;
for (size_t i = 0; i < results1.n && num < 50; i++) {
if (aligns1[i].score >= best_score1 - score_delta) {
++num;
mem_matesw(opts, ref->bns, ref->pac, &pes[0], aligns1[i].chained_hit, len2, (uint8_t *)seq2_conv, &results2);
}
}
arena_push(results1.a);
arena_push(results2.a);
aligns2 = safe_realloc(aligns2, results2.n * sizeof(*aligns2));
for (size_t i = 0; i < results2.n; i++) {
mem_alnreg_t *a = &results2.a[i];
interpret_align(ref, a, &aligns2[i]);
}
free(seq1_conv);
free(seq2_conv);
arena_push(aligns1);
arena_push(aligns2);
return (EasyAlignmentPairs){aligns1, aligns2, results1.n, results2.n};
}
void bwa_smith_waterman(bwaidx_t *ref, mem_opt_t *opts, char *seq, const size_t len, mem_alnreg_t *aln, SingleReadAlignment *res)
{
char *seq_conv = seq_convert(seq, len);
mem_aln_t result = mem_reg2aln(opts, ref->bns, ref->pac, len, seq_conv, aln);
arena_push(result.cigar);
arena_push(result.XA);
interpret_single_read_alignment(ref, &result, res);
free(seq_conv);
}
void interpret_align(bwaidx_t *ref, mem_alnreg_t *caln, EasyAlignment *res)
{
bntseq_t *contigs = ref->bns;
const int contig_id = caln->rid;
bntann1_t *contig = &contigs->anns[contig_id];
if (caln->rb < contigs->l_pac) {
res->offset = caln->rb - contig->offset;
res->rev = 0;
} else {
res->offset = contigs->l_pac*2 - 1 - caln->rb - contig->offset;
res->rev = 1;
}
if (caln->re < contigs->l_pac) {
res->aln_end = caln->re - contig->offset;
} else {
res->aln_end = contigs->l_pac*2 - 1 - caln->re - contig->offset;
}
res->contig = contig->name;
res->sec = (caln->secondary >= 0 || caln->secondary_all > 0) ? 1 : 0;
res->chained_hit = caln;
res->score = caln->score;
res->read_s = caln->qb;
res->read_e = caln->qe;
}
void interpret_chain(bwaidx_t *ref, mem_chain_t *chn, Chain *res)
{
bntseq_t *contigs = ref->bns;
const int contig_id = chn->rid;
bntann1_t *contig = &contigs->anns[contig_id];
if (chn->pos < contigs->l_pac) {
res->offset = chn->pos - contig->offset;
res->rev = 0;
} else {
res->offset = contigs->l_pac*2 - 1 - chn->pos - contig->offset;
res->rev = 1;
}
res->contig = contig->name;
res->chain = chn;
}
void interpret_single_read_alignment(bwaidx_t *ref, mem_aln_t *aln, SingleReadAlignment *res)
{
const int fixed_flags = ((mem_aln_compact_flag_t *)aln)->flag2;
bntseq_t *contigs = ref->bns;
const int contig_id = aln->rid;
bntann1_t *contig = &contigs->anns[contig_id];
res->pos = aln->pos;
res->chrom = contig->name;
res->cigar = aln->cigar;
res->n_cigar = aln->n_cigar;
res->alt = (fixed_flags & 0x2) >> 1;
res->mapq = (fixed_flags & 0x3fc) >> 2;//(fixed_flags & 0x2c) >> 2;
res->rev = fixed_flags & 0x1;
res->score = aln->score;
res->sub = aln->sub;
res->edit_dist = fixed_flags >> 10;
res->alt_sc = aln->alt_sc;
//res->raw = aln;
}
|
GxB_Monoid_identity.c | //------------------------------------------------------------------------------
// GxB_Monoid_identity: return the identity of a monoid
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Monoid_identity // return the monoid identity
(
void *identity, // returns the identity of the monoid
GrB_Monoid monoid // monoid to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Monoid_identity (&identity, monoid)") ;
GB_RETURN_IF_NULL (identity) ;
GB_RETURN_IF_NULL_OR_FAULTY (monoid) ;
ASSERT_MONOID_OK (monoid, "monoid for identity", GB0) ;
//--------------------------------------------------------------------------
// return the identity
//--------------------------------------------------------------------------
memcpy (identity, monoid->identity, monoid->op->ztype->size) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
main.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <omp.h>
#include "color.h"
#include "types.h"
#include "mandel.h"
#include "image_utils.h"
#ifndef _SCHELL_
#define _SCHELL_ guided
#endif
int main(int argc, char *argv[]) {
int block_size;
int ix, iy;
int progress;
int total, i, j;
int *escapetime;
int *hist;
srand(time(NULL));
_config config;
_color *bitmap, *pal;
config.screenx = 1920;
config.screeny = 1080;
config.screenx = 800;
config.screeny = 600;
config.bailout = 2500;
config.er = 2;
config.aa = 1;
config.minx = -2.5;
config.maxx = 1.5;
config.miny = -2.0;
config.maxy = 2.0;
config.minx = -0.7436431355 - 0.000014628;
config.maxx = -0.7436431355 + 0.000014628;
config.miny = 0.131825963 - 0.000014628;
config.maxy = 0.131825963 + 0.000014628;
/*config.minx = -0.743643887037151 - 0.000000000051299 / 256.0;*/
/*config.maxx = -0.743643887037151 + 0.000000000051299 / 256.0;*/
/*config.miny = 0.131825904205330 - 0.000000000051299 / 256.0;*/
/*config.maxy = 0.131825904205330 + 0.000000000051299 / 256.0;*/
/*config.minx = -0.743643887037151 - 0.000000000051299 / 1024.0;*/
/*config.maxx = -0.743643887037151 + 0.000000000051299 / 1024.0;*/
/*config.miny = 0.131825904205330 - 0.000000000051299 / 1024.0;*/
/*config.maxy = 0.131825904205330 + 0.000000000051299 / 1024.0;*/
block_size = 10;
progress = 0;
total = 0;
if ( argc > 1 ) {
omp_set_num_threads(atoi(argv[1]));
}
hist = ( int * ) malloc ( sizeof ( int ) * config.bailout );
escapetime = ( int * ) malloc ( sizeof ( int ) * config.screenx * config.screeny );
bitmap = ( _color* ) malloc ( sizeof ( _color ) * config.screenx * config.screeny );
pal = ( _color* ) malloc ( sizeof ( _color ) * 255 );
populatePal ( pal ) ;
bzero( hist, sizeof ( int ) * config.bailout );
printf("%f \t %f\n%f\t %f\n", config.minx, config.maxx, config.miny, config.maxy);
int bs = 10;
#pragma omp parallel for private(i,j) schedule(_SCHELL_)
for (i = 0; i < config.screenx; i += bs) {
for (j = 0; j < config.screeny; j += bs) {
do_block(i, i + bs, j, j + bs, config, escapetime);
}
fprintf(stderr," -- %.2f%%\n",((progress += bs)/((double)config.screenx))*100.0);
}
progress = 0;
block_size = 3;
#pragma omp parallel for private(i, j) shared(progress) schedule(_SCHELL_)
for (i = 0; i < config.screenx/block_size; ++i) {
for (j = 0; j < config.screeny/block_size; ++j) {
int x, y, dx, dy;
/*dx = 10 + rand()%5; //rand() % 20 - 10;*/
/*dy = 10 + rand()%5; //rand() % 20 - 10;*/
dx = rand() % 20 - 10;
dy = rand() % 20 - 10;
x = rand() % (config.screenx - dx);
y = rand() % (config.screeny - dy);
draw_line(x, x + dx, y, y + dy, config, escapetime);
}
fprintf(stderr," -- %.2f%%\n",((progress++)/((double)config.screenx/block_size))*100.0);
}
/*draw_line(0, config.screenx, 0, config.screeny, config, escapetime);*/
/*#pragma omp parallel for private(ix) schedule(_SCHELL_)*/
/*for (i = 0; i < config.screenx/block_size; ++i) {*/
/*for (j = 0; j < config.screeny/block_size; ++j) {*/
/*int x, y, dx, dy;*/
/*[>if ( rand() % 2 == 0 ) {<]*/
/*[>dx = rand() % 20;<]*/
/*[>dy = rand() % 10;<]*/
/*[>} else {<]*/
/*dx = rand() % 5 + 1;*/
/*dy = rand() % 5 + 1;*/
/*[>}<]*/
/*x = rand() % (config.screenx - dx);*/
/*y = rand() % (config.screeny - dy);*/
/*do_block(x , x + dx, y , y + dy, config, escapetime);*/
/*}*/
/*fprintf(stderr," -- %.2f%%\n",((progress++)/((double)config.screenx/block_size))*100.0);*/
/*}*/
int max = 0;
for ( iy = 0; iy < config.screeny; iy++ ) {
for ( ix = 0; ix < config.screenx; ix++ ) {
hist[escapetime[iy * config.screenx + ix]]++;
max = max < escapetime[iy * config.screenx + ix] ? escapetime[iy * config.screenx + ix] : max;
}
}
for ( i = 0; i < config.bailout; ++i) {
total += hist[i];
}
for ( i = 0; i < config.bailout - 1; ++i) {
hist[i] += hist[i-1];
}
for ( iy = 0; iy < config.screeny; iy++ ) {
for ( ix = 0; ix < config.screenx; ix++ ) {
if ( escapetime[iy * config.screenx + ix] == 0 ) {
} else {
/*bitmap[iy * config.screenx + ix].r = 255;*/
/*bitmap[iy * config.screenx + ix].g = 255;*/
/*bitmap[iy * config.screenx + ix].b = 255;*/
bitmap[iy * config.screenx + ix] = getPalMem(hist[escapetime[iy * config.screenx + ix]]/(double)total, pal);
}
/*bitmap[iy * config.screenx + ix].r = (int)((escapetime[iy * config.screenx + ix] / (double)max) * 255.0);*/
/*bitmap[iy * config.screenx + ix].g = (int)((escapetime[iy * config.screenx + ix] / (double)max) * 255.0);*/
/*bitmap[iy * config.screenx + ix].b = (int)((escapetime[iy * config.screenx + ix] / (double)max) * 255.0);*/
/*bitmap[iy * config.screenx + ix].r = escapetime[iy * config.screenx + ix];*/
/*bitmap[iy * config.screenx + ix].g = escapetime[iy * config.screenx + ix];*/
/*bitmap[iy * config.screenx + ix].b = escapetime[iy * config.screenx + ix];*/
}
}
fprintf(stderr," -- %.2f%%\n",100.0);
fprintf(stderr," <---- DONE ---->\n");
fprintf(stderr," Writing to disk!\n");
save_png_to_file(bitmap, config.screenx, config.screeny, "mandel.png");
free(escapetime);
fprintf(stderr," -- Bye\n");
return EXIT_SUCCESS;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.